code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__: Tuple = logging.get_logger(__name__)
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Optional[int] = ["input_features", "is_longer"]
def __init__( self :str , SCREAMING_SNAKE_CASE :Optional[int]=6_4 , SCREAMING_SNAKE_CASE :Optional[int]=4_8_0_0_0 , SCREAMING_SNAKE_CASE :Optional[int]=4_8_0 , SCREAMING_SNAKE_CASE :str=1_0 , SCREAMING_SNAKE_CASE :Union[str, Any]=1_0_2_4 , SCREAMING_SNAKE_CASE :List[str]=0.0 , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Dict = 0 , SCREAMING_SNAKE_CASE :Dict = 1_4_0_0_0 , SCREAMING_SNAKE_CASE :str = None , SCREAMING_SNAKE_CASE :List[Any] = "fusion" , SCREAMING_SNAKE_CASE :str = "repeatpad" , **SCREAMING_SNAKE_CASE :List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_a : Optional[int] =top_db
_a : int =truncation
_a : Union[str, Any] =padding
_a : Optional[int] =fft_window_size
_a : Any =(fft_window_size >> 1) + 1
_a : Union[str, Any] =hop_length
_a : int =max_length_s
_a : Dict =max_length_s * sampling_rate
_a : Optional[int] =sampling_rate
_a : int =frequency_min
_a : Dict =frequency_max
_a : Any =mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale="""htk""" , )
_a : Any =mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm="""slaney""" , mel_scale="""slaney""" , )
def __UpperCAmelCase ( self :str ) -> Dict[str, Any]:
'''simple docstring'''
_a : List[str] =copy.deepcopy(self.__dict__ )
_a : str =self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Dict = None ) -> np.ndarray:
'''simple docstring'''
_a : Optional[Any] =spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel="""dB""" , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int ) -> Union[str, Any]:
'''simple docstring'''
_a : Any =np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_a : Optional[Any] =[0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_a : List[Any] =[0]
# randomly choose index for each part
_a : Tuple =np.random.choice(ranges[0] )
_a : Optional[Any] =np.random.choice(ranges[1] )
_a : List[str] =np.random.choice(ranges[2] )
_a : Tuple =mel[idx_front : idx_front + chunk_frames, :]
_a : List[str] =mel[idx_middle : idx_middle + chunk_frames, :]
_a : Dict =mel[idx_back : idx_back + chunk_frames, :]
_a : Union[str, Any] =torch.tensor(mel[None, None, :] )
_a : List[str] =torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 6_4] , mode="""bilinear""" , align_corners=UpperCamelCase_ )
_a : Union[str, Any] =mel_shrink[0][0].numpy()
_a : Any =np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :str ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_a : List[str] =True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_a : Any =len(UpperCamelCase_ ) - max_length
_a : List[str] =np.random.randint(0 , overflow + 1 )
_a : List[Any] =waveform[idx : idx + max_length]
_a : Optional[Any] =self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_a : Union[str, Any] =self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_a : int =max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_a : List[str] =mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_a : int =np.stack([mel, mel, mel, mel] , axis=0 )
_a : int =False
else:
_a : Optional[Any] =self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_a : Union[str, Any] =True
else:
raise NotImplementedError(f"data_truncating {truncation} not implemented" )
else:
_a : Dict =False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_a : Dict =int(max_length / len(UpperCamelCase_ ) )
_a : List[Any] =np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_a : Tuple =int(max_length / len(UpperCamelCase_ ) )
_a : Any =np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_a : Optional[int] =np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
_a : int =self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_a : List[Any] =np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_a : Optional[Any] =self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self :Any , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Union[str, Any] = None , SCREAMING_SNAKE_CASE :List[Any] = None , SCREAMING_SNAKE_CASE :int = None , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :Optional[int] = None , **SCREAMING_SNAKE_CASE :Dict , ) -> BatchFeature:
'''simple docstring'''
_a : Optional[Any] =truncation if truncation is not None else self.truncation
_a : List[str] =padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_a : Dict =isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
_a : Union[str, Any] =is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a : Tuple =[np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_a : Tuple =np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a : List[str] =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a : Optional[Any] =[np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_a : Optional[Any] =[
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_a : Tuple =[]
_a : Tuple =[]
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_a : Tuple =np.random.randint(0 , len(UpperCamelCase_ ) )
_a : List[str] =True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_a : Dict =[np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_a : Optional[Any] =[[longer] for longer in is_longer]
_a : Dict ={'''input_features''': input_mel, '''is_longer''': is_longer}
_a : int =BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_a : List[Any] =input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 276 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a_ = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =[
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **UpperCamelCase_ ) -> Optional[Any]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowercase : Union[str, Any] = deprecated_arg[3:]
setattr(self , UpperCamelCase_ , not kwargs.pop(UpperCamelCase_ ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
__lowercase : Dict = kwargs.pop('''torchscript''' , self.torchscript )
__lowercase : str = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
__lowercase : str = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**UpperCamelCase_ )
UpperCamelCase =field(default=snake_case , metadata={"help": "Trace the models using torchscript"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
UpperCamelCase =field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def _lowerCamelCase ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
__lowercase : str = torch.device('''cpu''' )
__lowercase : Optional[Any] = 0
elif is_torch_tpu_available():
__lowercase : str = xm.xla_device()
__lowercase : Any = 0
else:
__lowercase : List[str] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__lowercase : Any = torch.cuda.device_count()
return device, n_gpu
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
return is_torch_tpu_available() and self.tpu
@property
def _lowerCamelCase ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _lowerCamelCase ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def _lowerCamelCase ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def _lowerCamelCase ( self ) -> Dict:
return self.n_gpu > 0
| 249 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Any = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return x + 2
class __A ( unittest.TestCase ):
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = 'x = 3'
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
lowerCAmelCase : Dict = 'x = y'
lowerCAmelCase : List[Any] = {'y': 5}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 5, 'y': 5} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = 'y = add_two(x)'
lowerCAmelCase : int = {'x': 3}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 'x = 3'
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : Any ):
lowerCAmelCase : Union[str, Any] = 'x = 3\ny = 5'
lowerCAmelCase : str = {}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = 'text = f\'This is x: {x}.\''
lowerCAmelCase : str = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'text': 'This is x: 3.'} )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = 'if x <= 3:\n y = 2\nelse:\n y = 5'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 2} )
lowerCAmelCase : Any = {'x': 8}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 8, 'y': 5} )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : int = 'test_list = [x, add_two(x)]'
lowerCAmelCase : Optional[Any] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [3, 5] )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : int = 'y = x'
lowerCAmelCase : Optional[int] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 3} )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = 'test_list = [x, add_two(x)]\ntest_list[1]'
lowerCAmelCase : List[str] = {'x': 3}
lowerCAmelCase : List[str] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
lowerCAmelCase : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
lowerCAmelCase : List[Any] = {'x': 3}
lowerCAmelCase : Optional[Any] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : int ):
lowerCAmelCase : Any = 'x = 0\nfor i in range(3):\n x = i'
lowerCAmelCase : str = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {'range': range} , state=UpperCAmelCase_ )
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 2, 'i': 2} )
| 323 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__magic_name__ = logging.get_logger(__name__) # pylint: disable=invalid-name
__magic_name__ = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Union[PIL.Image.Image, np.ndarray]
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
super().__init__()
self.register_modules(
prior=lowerCAmelCase__ , image_encoder=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , renderer=lowerCAmelCase__ , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
if latents is None:
__SCREAMING_SNAKE_CASE = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__)
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
__SCREAMING_SNAKE_CASE = latents.to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = latents * scheduler.init_noise_sigma
return latents
def snake_case_ ( self , lowerCAmelCase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""")
__SCREAMING_SNAKE_CASE = torch.device(f"cuda:{gpu_id}")
__SCREAMING_SNAKE_CASE = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__ , lowerCAmelCase__)
@property
def snake_case_ ( self):
if self.device != torch.device("""meta""") or not hasattr(self.image_encoder , """_hf_hook"""):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowerCAmelCase__ , """_hf_hook""")
and hasattr(module._hf_hook , """execution_device""")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__) and isinstance(image[0] , torch.Tensor):
__SCREAMING_SNAKE_CASE = torch.cat(lowerCAmelCase__ , axis=0) if image[0].ndim == 4 else torch.stack(lowerCAmelCase__ , axis=0)
if not isinstance(lowerCAmelCase__ , torch.Tensor):
__SCREAMING_SNAKE_CASE = self.image_processor(lowerCAmelCase__ , return_tensors="""pt""").pixel_values[0].unsqueeze(0)
__SCREAMING_SNAKE_CASE = image.to(dtype=self.image_encoder.dtype , device=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.image_encoder(lowerCAmelCase__)["""last_hidden_state"""]
__SCREAMING_SNAKE_CASE = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(lowerCAmelCase__ , dim=0)
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE = torch.zeros_like(lowerCAmelCase__)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds])
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase__)
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 2_5 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ):
if isinstance(lowerCAmelCase__ , PIL.Image.Image):
__SCREAMING_SNAKE_CASE = 1
elif isinstance(lowerCAmelCase__ , torch.Tensor):
__SCREAMING_SNAKE_CASE = image.shape[0]
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image)):
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase__)
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowerCAmelCase__)}")
__SCREAMING_SNAKE_CASE = self._execution_device
__SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
__SCREAMING_SNAKE_CASE = guidance_scale > 1.0
__SCREAMING_SNAKE_CASE = self._encode_image(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# prior
self.scheduler.set_timesteps(lowerCAmelCase__ , device=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.scheduler.timesteps
__SCREAMING_SNAKE_CASE = self.prior.config.num_embeddings
__SCREAMING_SNAKE_CASE = self.prior.config.embedding_dim
__SCREAMING_SNAKE_CASE = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__SCREAMING_SNAKE_CASE = latents.reshape(latents.shape[0] , lowerCAmelCase__ , lowerCAmelCase__)
for i, t in enumerate(self.progress_bar(lowerCAmelCase__)):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.prior(
lowerCAmelCase__ , timestep=lowerCAmelCase__ , proj_embedding=lowerCAmelCase__ , ).predicted_image_embedding
# remove the variance
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = noise_pred.split(
scaled_model_input.shape[2] , dim=2) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = noise_pred.chunk(2)
__SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__SCREAMING_SNAKE_CASE = self.scheduler.step(
lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = []
for i, latent in enumerate(lowerCAmelCase__):
print()
__SCREAMING_SNAKE_CASE = self.renderer.decode(
latent[None, :] , lowerCAmelCase__ , size=lowerCAmelCase__ , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.stack(lowerCAmelCase__)
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}")
__SCREAMING_SNAKE_CASE = images.cpu().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = [self.numpy_to_pil(lowerCAmelCase__) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowerCAmelCase__)
| 100 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( UpperCamelCase_ = "AAPL" ):
__SCREAMING_SNAKE_CASE = f"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(UpperCamelCase_ ).text , """html.parser""" )
__SCREAMING_SNAKE_CASE = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 100 | 1 |
from math import ceil
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = list(range(0 , _lowercase ) )
UpperCAmelCase_ : Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCAmelCase_ : List[Any] = []
for i in device_map_blocks:
if device_map_blocks.count(_lowercase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowercase )
# Missing blocks
UpperCAmelCase_ : Any = [i for i in blocks if i not in device_map_blocks]
UpperCAmelCase_ : Union[str, Any] = [i for i in device_map_blocks if i not in blocks]
if len(_lowercase ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(_lowercase ) )
if len(_lowercase ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(_lowercase ) )
if len(_lowercase ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(_lowercase ) )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = list(range(_lowercase ) )
UpperCAmelCase_ : Optional[Any] = int(ceil(n_layers / len(_lowercase ) ) )
UpperCAmelCase_ : str = [layers[i : i + n_blocks] for i in range(0 , _lowercase , _lowercase )]
return dict(zip(_lowercase , _lowercase ) ) | 355 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__a = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = r'''\w+[.]\d+'''
UpperCAmelCase_ : int = re.findall(_lowercase , _lowercase )
for pat in pats:
UpperCAmelCase_ : List[Any] = key.replace(_lowercase , '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase_ : List[Any] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase_ : Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase_ : Tuple = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase_ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase_ : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase_ : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase_ : Any = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase_ : Tuple = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=42 ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase_ : str = flax_model.init_weights(PRNGKey(_lowercase ) )
UpperCAmelCase_ : List[Any] = flatten_dict(_lowercase )
UpperCAmelCase_ : int = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ : Optional[int] = rename_key(_lowercase )
UpperCAmelCase_ : List[str] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
UpperCAmelCase_, UpperCAmelCase_ : Any = rename_key_and_reshape_tensor(_lowercase , _lowercase , _lowercase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : int = jnp.asarray(_lowercase )
return unflatten_dict(_lowercase ) | 235 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :int = logging.get_logger(__name__)
lowercase__ :int = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] ='''vit_msn'''
def __init__( self ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__="gelu" ,A__=0.0 ,A__=0.0 ,A__=0.02 ,A__=1E-06 ,A__=2_2_4 ,A__=1_6 ,A__=3 ,A__=True ,**A__ ,):
super().__init__(**A__)
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 101 |
from __future__ import annotations
from typing import Any
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_ = 6 ) -> None:
UpperCAmelCase_: Node | None = None
UpperCAmelCase_: Node | None = None
self.create_linked_list(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCAmelCase_: Optional[Any] = Node()
UpperCAmelCase_: Optional[Any] = current_node
UpperCAmelCase_: List[str] = current_node
UpperCAmelCase_: List[Any] = current_node
for _ in range(1, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Optional[int] = Node()
UpperCAmelCase_: Dict = current_node
UpperCAmelCase_: Any = previous_node
UpperCAmelCase_: Tuple = current_node
UpperCAmelCase_: Optional[Any] = self.front
UpperCAmelCase_: Any = previous_node
def __snake_case (self ) -> bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __snake_case (self ) -> Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase_: Optional[int] = self.rear.next
if self.rear:
UpperCAmelCase_: Any = data
def __snake_case (self ) -> Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase_: Union[str, Any] = self.front.data
UpperCAmelCase_: Any = None
return data
UpperCAmelCase_: str = self.front
UpperCAmelCase_: Union[str, Any] = old_front.next
UpperCAmelCase_: int = old_front.data
UpperCAmelCase_: Any = None
return data
def __snake_case (self ) -> None:
if self.is_empty():
raise Exception("""Empty Queue""" )
def __snake_case (self ) -> None:
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class _a :
def __init__(self ) -> None:
UpperCAmelCase_: Any | None = None
UpperCAmelCase_: Node | None = None
UpperCAmelCase_: Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : Tuple = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class _UpperCAmelCase ( _lowercase ):
"""simple docstring"""
a_ = """markuplm"""
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any]=3_0_5_2_2 , lowerCAmelCase_ : Tuple=7_6_8 , lowerCAmelCase_ : List[str]=1_2 , lowerCAmelCase_ : Optional[int]=1_2 , lowerCAmelCase_ : Any=3_0_7_2 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Any=5_1_2 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Union[str, Any]=1e-12 , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Dict=2_5_6 , lowerCAmelCase_ : int=1_0_2_4 , lowerCAmelCase_ : List[str]=2_1_6 , lowerCAmelCase_ : Tuple=1_0_0_1 , lowerCAmelCase_ : Any=3_2 , lowerCAmelCase_ : int=5_0 , lowerCAmelCase_ : Optional[int]="absolute" , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : Tuple , ) -> int:
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
# additional properties
__lowerCAmelCase = max_depth
__lowerCAmelCase = max_xpath_tag_unit_embeddings
__lowerCAmelCase = max_xpath_subs_unit_embeddings
__lowerCAmelCase = tag_pad_id
__lowerCAmelCase = subs_pad_id
__lowerCAmelCase = xpath_unit_hidden_size
| 360 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case : Union[str, Any] = False
try:
_snake_case : Tuple = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase_ : str = None , lowerCAmelCase_ : list = [] ) -> Optional[int]:
__lowerCAmelCase = 0
__lowerCAmelCase = choices
__lowerCAmelCase = prompt
if sys.platform == "win32":
__lowerCAmelCase = '*'
else:
__lowerCAmelCase = '➔ '
def lowercase ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str = "" ) -> Any:
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , lowerCAmelCase_ )
else:
forceWrite(self.choices[index] , lowerCAmelCase_ )
def lowercase ( self : List[Any] , lowerCAmelCase_ : int ) -> List[Any]:
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(lowerCAmelCase_ )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def lowercase ( self : Dict , lowerCAmelCase_ : Direction , lowerCAmelCase_ : int = 1 ) -> Union[str, Any]:
__lowerCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(lowerCAmelCase_ )
move_cursor(lowerCAmelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def lowercase ( self : Optional[int] ) -> Tuple:
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def lowercase ( self : str ) -> Optional[Any]:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def lowercase ( self : str ) -> Any:
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def lowercase ( self : Dict ) -> Optional[Any]:
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(lowerCAmelCase_ )] for number in range(1_0 )] )
def lowercase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase = int(chr(self.current_selection ) )
__lowerCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , lowerCAmelCase_ )
else:
return
else:
return
def lowercase ( self : Any , lowerCAmelCase_ : int = 0 ) -> int:
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__lowerCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(lowerCAmelCase_ )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCAmelCase = int(builtins.input() )
except ValueError:
__lowerCAmelCase = default_choice
else:
__lowerCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(lowerCAmelCase_ , '\n' )
return choice
| 207 | 0 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = ['a', 'b', 'c']
# Defaults to last layer if both are None
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = get_aligned_output_features_output_indices(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , ['c'] )
self.assertEqual(__UpperCAmelCase , [2] )
# Out indices set to match out features
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = get_aligned_output_features_output_indices(['a', 'c'] , __UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , ['a', 'c'] )
self.assertEqual(__UpperCAmelCase , [0, 2] )
# Out features set to match out indices
lowerCAmelCase__ , lowerCAmelCase__ :int = get_aligned_output_features_output_indices(__UpperCAmelCase , [0, 2] , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , ['a', 'c'] )
self.assertEqual(__UpperCAmelCase , [0, 2] )
# Out features selected from negative indices
lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = get_aligned_output_features_output_indices(__UpperCAmelCase , [-3, -1] , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , ['a', 'c'] )
self.assertEqual(__UpperCAmelCase , [-3, -1] )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , __UpperCAmelCase )
# Out features must be a list
with self.assertRaises(__UpperCAmelCase ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(__UpperCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(__UpperCAmelCase ):
verify_out_features_out_indices(__UpperCAmelCase , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(__UpperCAmelCase ):
verify_out_features_out_indices(__UpperCAmelCase , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(__UpperCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(__UpperCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(__UpperCAmelCase ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = BackboneMixin()
lowerCAmelCase__ :Tuple = ['a', 'b', 'c']
lowerCAmelCase__ :Optional[Any] = ['a', 'c']
lowerCAmelCase__ :Union[str, Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCAmelCase__ :List[str] = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCAmelCase__ :int = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 293 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Any = tokenizer('This is me' , return_tensors='pt' )
lowerCAmelCase__ :Dict = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCAmelCase__ :Optional[Any] = model.generate(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Any = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCAmelCase__ :Union[str, Any] = model_reloaded.generate(**__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase__ :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__UpperCAmelCase ):
model.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = model.reverse_bettertransformer()
model.save_pretrained(__UpperCAmelCase )
| 293 | 1 |
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
SCREAMING_SNAKE_CASE : int = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
inspect_dataset(snake_case_ , snake_case_ )
_lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(snake_case_ )
assert "__pycache__" not in os.listdir(snake_case_ )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def __UpperCAmelCase ( snake_case_ : List[str] , snake_case_ : int ) -> str:
"""simple docstring"""
inspect_metric(snake_case_ , snake_case_ )
_lowerCAmelCase = path + """.py"""
assert script_name in os.listdir(snake_case_ )
assert "__pycache__" not in os.listdir(snake_case_ )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Dict ) -> Any:
"""simple docstring"""
_lowerCAmelCase = get_dataset_config_info(snake_case_ , config_name=snake_case_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : int , snake_case_ : int ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(snake_case_ ):
get_dataset_config_info(snake_case_ , config_name=snake_case_ )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def __UpperCAmelCase ( snake_case_ : Tuple , snake_case_ : Tuple ) -> int:
"""simple docstring"""
_lowerCAmelCase = get_dataset_config_names(snake_case_ )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : int ) -> Any:
"""simple docstring"""
_lowerCAmelCase = get_dataset_infos(snake_case_ )
assert list(infos.keys() ) == expected_configs
_lowerCAmelCase = expected_configs[0]
assert expected_config in infos
_lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : str , snake_case_ : Tuple ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = get_dataset_infos(snake_case_ )
assert expected_config in infos
_lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def __UpperCAmelCase ( snake_case_ : List[str] , snake_case_ : Any , snake_case_ : int ) -> List[str]:
"""simple docstring"""
with pytest.raises(snake_case_ ):
get_dataset_split_names(snake_case_ , config_name=snake_case_ ) | 317 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width
SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = ''''''
SCREAMING_SNAKE_CASE : List[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = 2_5_0
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ )
for index in range(snake_case_ ):
_lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase = random_chars(32 )
_lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_lowerCAmelCase = []
for anno in new_annos:
_lowerCAmelCase = anno[3] - anno[1]
_lowerCAmelCase = anno[4] - anno[2]
_lowerCAmelCase = anno[1] + width / 2
_lowerCAmelCase = anno[2] + height / 2
_lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(snake_case_ )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ):
_lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case_ ) as in_file:
_lowerCAmelCase = in_file.readlines()
_lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" )
_lowerCAmelCase = []
for obj_list in obj_lists:
_lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ )
_lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2
_lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
_lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = int(scale_x * output_size[1] )
_lowerCAmelCase = int(scale_y * output_size[0] )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, index in enumerate(snake_case_ ):
_lowerCAmelCase = all_img_list[index]
path_list.append(snake_case_ )
_lowerCAmelCase = all_annos[index]
_lowerCAmelCase = cva.imread(snake_case_ )
if i == 0: # top-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCAmelCase = cva.resize(
snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCAmelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCAmelCase ( snake_case_ : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase = ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''') | 317 | 1 |
'''simple docstring'''
from typing import Any
class __UpperCamelCase :
def __init__( self , __a ):
'''simple docstring'''
__a : Optional[int] = data
__a : List[Any] = None
def __repr__( self ):
'''simple docstring'''
return f"""Node({self.data})"""
class __UpperCamelCase :
def __init__( self ):
'''simple docstring'''
__a : Optional[Any] = None
def __iter__( self ):
'''simple docstring'''
__a : List[str] = self.head
while node:
yield node.data
__a : Union[str, Any] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(__a ) for item in self] )
def __getitem__( self , __a ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , __a , __a ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__a : Any = self.head
for _ in range(__a ):
__a : int = current.next
__a : str = data
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.insert_nth(len(self ) , __a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.insert_nth(0 , __a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__a : List[Any] = Node(__a )
if self.head is None:
__a : Optional[Any] = new_node
elif index == 0:
__a : Optional[Any] = self.head # link new_node to head
__a : Union[str, Any] = new_node
else:
__a : Any = self.head
for _ in range(index - 1 ):
__a : Optional[int] = temp.next
__a : List[Any] = temp.next
__a : List[str] = new_node
def __UpperCAmelCase ( self ): # print every node data
'''simple docstring'''
print(self )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def __UpperCAmelCase ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def __UpperCAmelCase ( self , __a = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__a : Optional[int] = self.head # default first node
if index == 0:
__a : Optional[Any] = self.head.next
else:
__a : int = self.head
for _ in range(index - 1 ):
__a : Any = temp.next
__a : Any = temp.next
__a : Any = temp.next.next
return delete_node.data
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.head is None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = None
__a : Dict = self.head
while current:
# Store the current node's next node.
__a : Optional[int] = current.next
# Make the current node's next point backwards
__a : Any = prev
# Make the previous node be the current node
__a : str = current
# Make the current node the next node (to progress iteration)
__a : Dict = next_node
# Return prev in order to put the head at the end
__a : Tuple = prev
def lowerCamelCase ():
__a : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(_SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(_SCREAMING_SNAKE_CASE , i + 1 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_SCREAMING_SNAKE_CASE ) == 9
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__a : Dict = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def lowerCamelCase ():
__a : Tuple = [
-9,
100,
Node(77_345_112 ),
'dlrow olleH',
7,
5_555,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__a : Any = LinkedList()
for i in test_input:
linked_list.insert_tail(_SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__a : Union[str, Any] = linked_list.delete_head()
assert result == -9
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__a : Dict = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__a : Union[str, Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_SCREAMING_SNAKE_CASE )
assert (
str(_SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase ():
from doctest import testmod
testmod()
__a : List[str] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_SCREAMING_SNAKE_CASE )
print('\nReading/changing Node data using indexing:' )
print(F"""Element at Position 1: {linked_list[1]}""" )
__a : Union[str, Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(_SCREAMING_SNAKE_CASE )
print(F"""length of linked_list is : {len(_SCREAMING_SNAKE_CASE )}""" )
if __name__ == "__main__":
main()
| 27 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[int] ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple =['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__snake_case : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 129 | 0 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a = '''sshleifer/bart-tiny-random'''
a = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[Any] ):
return AutoConfig.from_pretrained(_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_A , *_A = create_student_by_copying_alternating_layers(_UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A , *_A = create_student_by_copying_alternating_layers(_UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_A , *_A = create_student_by_copying_alternating_layers(_UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=_UpperCAmelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def lowerCAmelCase_ ( self : str ):
_A , *_A = create_student_by_copying_alternating_layers(_UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def lowerCAmelCase_ ( self : Optional[int] ):
with self.assertRaises(_UpperCAmelCase ):
create_student_by_copying_alternating_layers(_UpperCAmelCase , tempfile.mkdtemp() , e=_UpperCAmelCase , d=_UpperCAmelCase )
| 271 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
a = HUGGINGFACE_HUB_CACHE
a = '''config.json'''
a = '''diffusion_pytorch_model.bin'''
a = '''diffusion_flax_model.msgpack'''
a = '''model.onnx'''
a = '''diffusion_pytorch_model.safetensors'''
a = '''weights.pb'''
a = '''https://huggingface.co'''
a = default_cache_path
a = '''diffusers_modules'''
a = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
a = ['''fp16''', '''non-ema''']
a = '''.self_attn'''
| 271 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_UpperCAmelCase = False
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 1_2
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 1_2
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 3_2
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Dict = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(a__ )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Union[str, Any] = 1_2
A_ : Union[str, Any] = 1_2
A_ : List[str] = {
'attention_bias': True,
'cross_attention_dim': 3_2,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 3_2,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
A_ : Tuple = TransformeraDModel(**a__ )
return model
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = 'cpu'
A_ : Union[str, Any] = self.dummy_vqvae
A_ : List[Any] = self.dummy_text_encoder
A_ : List[Any] = self.dummy_tokenizer
A_ : Dict = self.dummy_transformer
A_ : Union[str, Any] = VQDiffusionScheduler(self.num_embed )
A_ : Union[str, Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=a__ )
A_ : str = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
A_ : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A_ : Tuple = 'teddy bear playing in the pool'
A_ : Tuple = torch.Generator(device=a__ ).manual_seed(0 )
A_ : Tuple = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type='np' )
A_ : Any = output.images
A_ : Dict = torch.Generator(device=a__ ).manual_seed(0 )
A_ : int = pipe(
[prompt] , generator=a__ , output_type='np' , return_dict=a__ , num_inference_steps=2 )[0]
A_ : Any = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
A_ : Tuple = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = 'cpu'
A_ : Tuple = self.dummy_vqvae
A_ : Union[str, Any] = self.dummy_text_encoder
A_ : Union[str, Any] = self.dummy_tokenizer
A_ : List[str] = self.dummy_transformer
A_ : Optional[Any] = VQDiffusionScheduler(self.num_embed )
A_ : int = LearnedClassifierFreeSamplingEmbeddings(
learnable=a__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
A_ : List[str] = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
A_ : Optional[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A_ : Optional[Any] = 'teddy bear playing in the pool'
A_ : Any = torch.Generator(device=a__ ).manual_seed(0 )
A_ : Optional[int] = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type='np' )
A_ : Optional[Any] = output.images
A_ : Optional[Any] = torch.Generator(device=a__ ).manual_seed(0 )
A_ : Dict = pipe(
[prompt] , generator=a__ , output_type='np' , return_dict=a__ , num_inference_steps=2 )[0]
A_ : List[Any] = image[0, -3:, -3:, -1]
A_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
A_ : str = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
A_ : int = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
A_ : List[Any] = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
A_ : str = torch.Generator(device=a__ ).manual_seed(0 )
A_ : List[Any] = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=a__ , output_type='np' , )
A_ : Dict = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 140 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Tuple = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
_SCREAMING_SNAKE_CASE : List[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = BlipImageProcessor()
UpperCAmelCase_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
UpperCAmelCase_ = BlipProcessor(_UpperCAmelCase , _UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[int] , **_UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).tokenizer
def lowercase__ ( self : Dict , **_UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
UpperCAmelCase_ = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="np" )
UpperCAmelCase_ = processor(images=_UpperCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCAmelCase_ = "lower newer"
UpperCAmelCase_ = processor(text=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer(_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCAmelCase_ = "lower newer"
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCAmelCase_ = "lower newer"
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 241 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''ViTImageProcessor'''
UpperCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : List[Any] , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
UpperCAmelCase_ = kwargs.pop("feature_extractor" )
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None:
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
UpperCAmelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowercase__ ( self : List[Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : Dict , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 241 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = ["""image_processor""", """tokenizer"""]
lowerCamelCase_ : Tuple = """ViTImageProcessor"""
lowerCamelCase_ : str = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> int:
lowerCamelCase : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase__ , )
lowerCamelCase : Any = kwargs.pop("feature_extractor" )
lowerCamelCase : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Union[str, Any]:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCamelCase : Any = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if visual_prompt is not None:
lowerCamelCase : List[Any] = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
lowerCamelCase : Tuple = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if visual_prompt is not None and images is not None:
lowerCamelCase : Optional[Any] = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCamelCase : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCamelCase : Tuple = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _lowercase ( self ) -> Dict:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase__ , )
return self.image_processor_class
@property
def _lowercase ( self ) -> str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase__ , )
return self.image_processor
| 48 |
'''simple docstring'''
from ....utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
class lowercase_ ( a__ ):
def __init__( self , a , a=None , a=20_48 ):
UpperCamelCase__ = config.__dict__
UpperCamelCase__ = modal_hidden_size
if num_labels:
UpperCamelCase__ = num_labels
| 80 | 0 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =ComputeEnvironment.AMAZON_SAGEMAKER
lowerCamelCase__ =True
lowerCamelCase__ ='ml.p3.2xlarge'
lowerCamelCase__ ='accelerate_sagemaker_execution_role'
lowerCamelCase__ ='hf-sm'
lowerCamelCase__ ='us-east-1'
lowerCamelCase__ =1
lowerCamelCase__ ='accelerate-sagemaker-1'
lowerCamelCase__ ='1.6'
lowerCamelCase__ ='4.4'
lowerCamelCase__ ='train.py'
lowerCamelCase__ =[
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
lowerCamelCase__ =[
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , a_ )
assert isinstance(converted_args['''do_train'''] , a_ )
assert isinstance(converted_args['''epochs'''] , a_ )
assert isinstance(converted_args['''learning_rate'''] , a_ )
assert isinstance(converted_args['''max_steps'''] , a_ )
with pytest.raises(a_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 24 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
SCREAMING_SNAKE_CASE : int = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def lowercase ( _snake_case : Optional[int] ) ->int:
"""simple docstring"""
__snake_case : int = {}
with open(_snake_case , '''r''' ) as file:
for line_number, line in enumerate(_snake_case ):
__snake_case : Union[str, Any] = line.strip()
if line:
__snake_case : str = line.split()
__snake_case : Union[str, Any] = line_number
__snake_case : Dict = words[0]
__snake_case : str = value
return result
def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]:
"""simple docstring"""
for attribute in key.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : str = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape
elif weight_type is not None and weight_type == "param":
__snake_case : Optional[Any] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : List[str] = shape_pointer.shape
# let's reduce dimension
__snake_case : int = value[0]
else:
__snake_case : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case : List[Any] = value
elif weight_type == "weight_g":
__snake_case : Tuple = value
elif weight_type == "weight_v":
__snake_case : str = value
elif weight_type == "bias":
__snake_case : str = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__snake_case : List[Any] = getattr(_snake_case , _snake_case )
__snake_case : int = value
else:
__snake_case : List[Any] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int:
"""simple docstring"""
__snake_case : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : List[str] = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : str = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__snake_case : Tuple = '''.'''.join([key, hf_param_name] )
else:
__snake_case : Optional[int] = key
__snake_case : List[Any] = value if '''lm_head''' in full_key else value[0]
SCREAMING_SNAKE_CASE : Tuple = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict:
"""simple docstring"""
__snake_case : Tuple = False
for key, mapped_key in MAPPING.items():
__snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case : int = True
if "*" in mapped_key:
__snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2]
__snake_case : Tuple = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
__snake_case : Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
__snake_case : List[str] = '''weight_v'''
elif "bias" in name:
__snake_case : Any = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case : List[Any] = '''weight'''
else:
__snake_case : Union[str, Any] = None
if hf_dict is not None:
rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
else:
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
return is_used
return is_used
def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any:
"""simple docstring"""
__snake_case : Union[str, Any] = []
__snake_case : Union[str, Any] = fairseq_model.state_dict()
__snake_case : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : str = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case : Union[str, Any] = True
else:
__snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case )
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1]
__snake_case : str = name.split('''.''' )
__snake_case : Optional[int] = int(items[0] )
__snake_case : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__snake_case : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__snake_case : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict:
"""simple docstring"""
if config_path is not None:
__snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case )
else:
__snake_case : Tuple = WavaVecaConfig()
if is_seq_class:
__snake_case : Optional[int] = read_txt_into_dict(_snake_case )
__snake_case : List[Any] = idalabel
__snake_case : int = WavaVecaForSequenceClassification(_snake_case )
__snake_case : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
feature_extractor.save_pretrained(_snake_case )
elif is_finetuned:
if dict_path:
__snake_case : int = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Tuple = target_dict.pad_index
__snake_case : int = target_dict.bos_index
__snake_case : Tuple = target_dict.eos_index
__snake_case : Optional[Any] = len(target_dict.symbols )
__snake_case : Any = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
__snake_case : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case : Dict = 0
__snake_case : List[Any] = 1
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_snake_case , _snake_case )
__snake_case : List[Any] = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
__snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False
__snake_case : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
__snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
__snake_case : Optional[int] = WavaVecaForCTC(_snake_case )
else:
__snake_case : Tuple = WavaVecaForPreTraining(_snake_case )
if is_finetuned or is_seq_class:
__snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' )
__snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case )
__snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case )
__snake_case : int = model[0].eval()
recursively_load_weights(_snake_case , _snake_case , not is_finetuned )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 24 | 1 |
"""simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(UpperCamelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 221 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = b.T
A__ = np.sum(np.square(UpperCamelCase__ ) , axis=1 )
A__ = np.sum(np.square(UpperCamelCase__ ) , axis=0 )
A__ = np.matmul(UpperCamelCase__ , UpperCamelCase__ )
A__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = x.reshape(-1 , 3 )
A__ = squared_euclidean_distance(UpperCamelCase__ , UpperCamelCase__ )
return np.argmin(UpperCamelCase__ , axis=1 )
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Tuple = ['pixel_values']
def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = PILImageResampling.BILINEAR ,__UpperCAmelCase = True ,__UpperCAmelCase = True ,**__UpperCAmelCase ,) -> None:
super().__init__(**__UpperCAmelCase )
A__ = size if size is not None else {'height': 2_56, 'width': 2_56}
A__ = get_size_dict(__UpperCAmelCase )
A__ = np.array(__UpperCAmelCase ) if clusters is not None else None
A__ = do_resize
A__ = size
A__ = resample
A__ = do_normalize
A__ = do_color_quantize
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = PILImageResampling.BILINEAR ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> np.ndarray:
A__ = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
__UpperCAmelCase ,size=(size['height'], size['width']) ,resample=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,) -> np.ndarray:
A__ = rescale(image=__UpperCAmelCase ,scale=1 / 1_2_7.5 ,data_format=__UpperCAmelCase )
A__ = image - 1
return image
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = ChannelDimension.FIRST ,**__UpperCAmelCase ,) -> PIL.Image.Image:
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(__UpperCAmelCase )
A__ = resample if resample is not None else self.resample
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
A__ = clusters if clusters is not None else self.clusters
A__ = np.array(__UpperCAmelCase )
A__ = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
A__ = [self.resize(image=__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase ) for image in images]
if do_normalize:
A__ = [self.normalize(image=__UpperCAmelCase ) for image in images]
if do_color_quantize:
A__ = [to_channel_dimension_format(__UpperCAmelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
A__ = np.array(__UpperCAmelCase )
A__ = color_quantize(__UpperCAmelCase ,__UpperCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
A__ = images.shape[0]
A__ = images.reshape(__UpperCAmelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
A__ = list(__UpperCAmelCase )
else:
A__ = [to_channel_dimension_format(__UpperCAmelCase ,__UpperCAmelCase ) for image in images]
A__ = {'input_ids': images}
return BatchFeature(data=__UpperCAmelCase ,tensor_type=__UpperCAmelCase )
| 221 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 183 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> Dict:
UpperCamelCase = tesseract_config if tesseract_config is not None else """"""
# apply OCR
UpperCamelCase = to_pil_image(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = pil_image.size
UpperCamelCase = pytesseract.image_to_data(__UpperCamelCase , lang=__UpperCamelCase , output_type="""dict""" , config=__UpperCamelCase )
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
UpperCamelCase = [idx for idx, word in enumerate(__UpperCamelCase ) if not word.strip()]
UpperCamelCase = [word for idx, word in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase = []
for x, y, w, h in zip(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase = [x, y, x + w, y + h]
actual_boxes.append(__UpperCamelCase )
# finally, normalize the bounding boxes
UpperCamelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
assert len(__UpperCamelCase ) == len(__UpperCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a_ ( lowerCamelCase ):
lowercase = ["""pixel_values"""]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "" , **_SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = size if size is not None else {"""height""": 224, """width""": 224}
UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = apply_ocr
UpperCamelCase = ocr_lang
UpperCamelCase = tesseract_config
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
UpperCamelCase = (size["""height"""], size["""width"""])
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE )
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
UpperCamelCase = []
UpperCamelCase = []
for image in images:
UpperCamelCase ,UpperCamelCase = apply_tesseract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
words_batch.append(_SCREAMING_SNAKE_CASE )
boxes_batch.append(_SCREAMING_SNAKE_CASE )
if do_resize:
UpperCamelCase = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase = [flip_channel_order(_SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase = BatchFeature(data={"""pixel_values""": images} , tensor_type=_SCREAMING_SNAKE_CASE )
if apply_ocr:
UpperCamelCase = words_batch
UpperCamelCase = boxes_batch
return data
| 183 | 1 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCAmelCase : int = logging.getLogger(__name__)
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.argmax(snake_case__ , axis=1 )
return np.sum(outputs == labels )
def a__ ( a__ ):
"""simple docstring"""
with open(snake_case__ , encoding="""utf_8""" ) as f:
__SCREAMING_SNAKE_CASE = csv.reader(snake_case__ )
__SCREAMING_SNAKE_CASE = []
next(snake_case__ ) # skip the first line
for line in tqdm(snake_case__ ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( a__ , a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for dataset in encoded_datasets:
__SCREAMING_SNAKE_CASE = len(snake_case__ )
__SCREAMING_SNAKE_CASE = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__SCREAMING_SNAKE_CASE = np.zeros((n_batch, 2) , dtype=np.intaa )
__SCREAMING_SNAKE_CASE = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__SCREAMING_SNAKE_CASE = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(snake_case__ ):
__SCREAMING_SNAKE_CASE = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__SCREAMING_SNAKE_CASE = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__SCREAMING_SNAKE_CASE = with_conta
__SCREAMING_SNAKE_CASE = with_conta
__SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
__SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
__SCREAMING_SNAKE_CASE = with_conta
__SCREAMING_SNAKE_CASE = with_conta
__SCREAMING_SNAKE_CASE = mc_label
__SCREAMING_SNAKE_CASE = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(snake_case__ ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=snake_case__ , default="""openai-gpt""" , help="""pretrained model name""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=snake_case__ , default="""""" )
parser.add_argument("""--eval_dataset""" , type=snake_case__ , default="""""" )
parser.add_argument("""--seed""" , type=snake_case__ , default=42 )
parser.add_argument("""--num_train_epochs""" , type=snake_case__ , default=3 )
parser.add_argument("""--train_batch_size""" , type=snake_case__ , default=8 )
parser.add_argument("""--eval_batch_size""" , type=snake_case__ , default=16 )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=snake_case__ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , type=snake_case__ , default=1 )
parser.add_argument(
"""--max_steps""" , default=-1 , type=snake_case__ , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=snake_case__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=snake_case__ , default=6.2_5E-5 )
parser.add_argument("""--warmup_steps""" , default=0 , type=snake_case__ , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""" , type=snake_case__ , default="""warmup_linear""" )
parser.add_argument("""--weight_decay""" , type=snake_case__ , default=0.01 )
parser.add_argument("""--lm_coef""" , type=snake_case__ , default=0.9 )
parser.add_argument("""--n_valid""" , type=snake_case__ , default=3_74 )
parser.add_argument("""--server_ip""" , type=snake_case__ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=snake_case__ , default="""""" , help="""Can be used for distant debugging.""" )
__SCREAMING_SNAKE_CASE = parser.parse_args()
print(snake_case__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(snake_case__ , snake_case__ ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__SCREAMING_SNAKE_CASE = ["""_start_""", """_delimiter_""", """_classify_"""]
__SCREAMING_SNAKE_CASE = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(snake_case__ )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(snake_case__ )
__SCREAMING_SNAKE_CASE = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(snake_case__ ) )
model.to(snake_case__ )
# Load and encode the datasets
def tokenize_and_encode(a__ ):
if isinstance(snake_case__ , snake_case__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(snake_case__ ) )
elif isinstance(snake_case__ , snake_case__ ):
return obj
return [tokenize_and_encode(snake_case__ ) for o in obj]
logger.info("""Encoding dataset...""" )
__SCREAMING_SNAKE_CASE = load_rocstories_dataset(args.train_dataset )
__SCREAMING_SNAKE_CASE = load_rocstories_dataset(args.eval_dataset )
__SCREAMING_SNAKE_CASE = (train_dataset, eval_dataset)
__SCREAMING_SNAKE_CASE = tokenize_and_encode(snake_case__ )
# Compute the max input length for the Transformer
__SCREAMING_SNAKE_CASE = model.config.n_positions // 2 - 2
__SCREAMING_SNAKE_CASE = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__SCREAMING_SNAKE_CASE = min(snake_case__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__SCREAMING_SNAKE_CASE = pre_process_datasets(snake_case__ , snake_case__ , snake_case__ , *snake_case__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tensor_datasets[0], tensor_datasets[1]
__SCREAMING_SNAKE_CASE = TensorDataset(*snake_case__ )
__SCREAMING_SNAKE_CASE = RandomSampler(snake_case__ )
__SCREAMING_SNAKE_CASE = DataLoader(snake_case__ , sampler=snake_case__ , batch_size=args.train_batch_size )
__SCREAMING_SNAKE_CASE = TensorDataset(*snake_case__ )
__SCREAMING_SNAKE_CASE = SequentialSampler(snake_case__ )
__SCREAMING_SNAKE_CASE = DataLoader(snake_case__ , sampler=snake_case__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__SCREAMING_SNAKE_CASE = args.max_steps
__SCREAMING_SNAKE_CASE = args.max_steps // (len(snake_case__ ) // args.gradient_accumulation_steps) + 1
else:
__SCREAMING_SNAKE_CASE = len(snake_case__ ) // args.gradient_accumulation_steps * args.num_train_epochs
__SCREAMING_SNAKE_CASE = list(model.named_parameters() )
__SCREAMING_SNAKE_CASE = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
__SCREAMING_SNAKE_CASE = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
__SCREAMING_SNAKE_CASE = AdamW(snake_case__ , lr=args.learning_rate , eps=args.adam_epsilon )
__SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
snake_case__ , num_warmup_steps=args.warmup_steps , num_training_steps=snake_case__ )
if args.do_train:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = tqdm(snake_case__ , desc="""Training""" )
for step, batch in enumerate(snake_case__ ):
__SCREAMING_SNAKE_CASE = tuple(t.to(snake_case__ ) for t in batch )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = batch
__SCREAMING_SNAKE_CASE = model(snake_case__ , mc_token_ids=snake_case__ , lm_labels=snake_case__ , mc_labels=snake_case__ )
__SCREAMING_SNAKE_CASE = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__SCREAMING_SNAKE_CASE = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__SCREAMING_SNAKE_CASE = """Training loss: {:.2e} lr: {:.2e}""".format(snake_case__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__SCREAMING_SNAKE_CASE = model.module if hasattr(snake_case__ , """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , snake_case__ )
__SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , snake_case__ )
torch.save(model_to_save.state_dict() , snake_case__ )
model_to_save.config.to_json_file(snake_case__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__SCREAMING_SNAKE_CASE = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__SCREAMING_SNAKE_CASE = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(snake_case__ )
if args.do_eval:
model.eval()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 0
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 0
for batch in tqdm(snake_case__ , desc="""Evaluating""" ):
__SCREAMING_SNAKE_CASE = tuple(t.to(snake_case__ ) for t in batch )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = batch
with torch.no_grad():
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model(
snake_case__ , mc_token_ids=snake_case__ , lm_labels=snake_case__ , mc_labels=snake_case__ )
__SCREAMING_SNAKE_CASE = mc_logits.detach().cpu().numpy()
__SCREAMING_SNAKE_CASE = mc_labels.to("""cpu""" ).numpy()
__SCREAMING_SNAKE_CASE = accuracy(snake_case__ , snake_case__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__SCREAMING_SNAKE_CASE = eval_loss / nb_eval_steps
__SCREAMING_SNAKE_CASE = eval_accuracy / nb_eval_examples
__SCREAMING_SNAKE_CASE = tr_loss / nb_tr_steps if args.do_train else None
__SCREAMING_SNAKE_CASE = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
__SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , """eval_results.txt""" )
with open(snake_case__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , snake_case__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 267 |
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 ,snake_case__ ):
for j in range(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306 | 0 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float , __A : float , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111 |
'''simple docstring'''
import random
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : float , __A : bool = False ) -> dict:
_SCREAMING_SNAKE_CASE = {i: [] for i in range(__A )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__A )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__A ):
for j in range(i + 1 , __A ):
if random.random() < probability:
graph[i].append(__A )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__A )
return graph
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> dict:
return {
i: [j for j in range(__A ) if i != j] for i in range(__A )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111 | 1 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=3 , _a=32 , _a=3 , _a=10 , _a=[10, 20, 30, 40] , _a=[1, 1, 2, 1] , _a=True , _a=True , _a="relu" , _a=3 , _a=None , ) -> Any:
_A : Tuple = parent
_A : int = batch_size
_A : int = image_size
_A : List[str] = num_channels
_A : Optional[int] = embeddings_size
_A : int = hidden_sizes
_A : Any = depths
_A : Dict = is_training
_A : Union[str, Any] = use_labels
_A : List[str] = hidden_act
_A : Any = num_labels
_A : int = scope
_A : Any = len(_a )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : Optional[Any] = None
if self.use_labels:
_A : Tuple = ids_tensor([self.batch_size] , self.num_labels )
_A : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[str]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> str:
_A : List[str] = TFRegNetModel(config=_a )
_A : Optional[Any] = model(_a , training=_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> Dict:
_A : List[str] = self.num_labels
_A : str = TFRegNetForImageClassification(_a )
_A : List[Any] = model(_a , labels=_a , training=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self ) -> Optional[Any]:
_A : int = self.prepare_config_and_inputs()
_A , _A , _A : List[str] = config_and_inputs
_A : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_a = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = TFRegNetModelTester(self )
_A : str = ConfigTester(self , config_class=_a , has_text_modality=_a )
def a__ ( self ) -> Union[str, Any]:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def a__ ( self ) -> List[Any]:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def a__ ( self ) -> Any:
pass
def a__ ( self ) -> str:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Dict = model_class(_a )
_A : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : Optional[Any] = [*signature.parameters.keys()]
_A : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Dict:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Dict:
def check_hidden_states_output(_a , _a , _a ):
_A : int = model_class(_a )
_A : Optional[Any] = model(**self._prepare_for_class(_a , _a ) , training=_a )
_A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_A , _A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_A : Dict = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_A : Tuple = layer_type
_A : str = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Any = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> Dict:
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_a , _a , _a , _a={} ):
_A : Dict = model(_a , return_dict=_a , **_a )
_A : int = model(_a , return_dict=_a , **_a ).to_tuple()
def recursive_check(_a , _a ):
if isinstance(_a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_a , _a ):
recursive_check(_a , _a )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_a , _a ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(_a , _a )
for model_class in self.all_model_classes:
_A : Dict = model_class(_a )
_A : str = self._prepare_for_class(_a , _a )
_A : Dict = self._prepare_for_class(_a , _a )
check_equivalence(_a , _a , _a )
_A : Optional[Any] = self._prepare_for_class(_a , _a , return_labels=_a )
_A : List[str] = self._prepare_for_class(_a , _a , return_labels=_a )
check_equivalence(_a , _a , _a )
_A : Optional[Any] = self._prepare_for_class(_a , _a )
_A : List[Any] = self._prepare_for_class(_a , _a )
check_equivalence(_a , _a , _a , {"""output_hidden_states""": True} )
_A : int = self._prepare_for_class(_a , _a , return_labels=_a )
_A : List[Any] = self._prepare_for_class(_a , _a , return_labels=_a )
check_equivalence(_a , _a , _a , {"""output_hidden_states""": True} )
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Optional[int]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : List[Any] = TFRegNetModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> str:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a__ ( self ) -> Dict:
_A : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_A : str = self.default_image_processor
_A : Dict = prepare_img()
_A : int = image_processor(images=_a , return_tensors="""tf""" )
# forward pass
_A : Any = model(**_a , training=_a )
# verify the logits
_A : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : str = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _a , atol=1e-4 )
| 26 | """simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__lowerCamelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__lowerCamelCase = "main"
# Default branch name
__lowerCamelCase = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
__lowerCamelCase = "aaaaaaa"
# This commit does not exist, so we should 404.
__lowerCamelCase = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
__lowerCamelCase = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def UpperCAmelCase ( ):
"""simple docstring"""
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def UpperCAmelCase ( ):
"""simple docstring"""
print('Bonjour!' )
yield
print('Au revoir!' )
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> List[str]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class UpperCamelCase__( unittest.TestCase ):
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def snake_case__ ( self ,__UpperCAmelCase ) -> Dict:
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() ,'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def snake_case__ ( self ,__UpperCAmelCase ) -> List[str]:
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def snake_case__ ( self ,__UpperCAmelCase ) -> Any:
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def snake_case__ ( self ) -> Union[str, Any]:
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['start_positions', 'end_positions'] )
class UpperCamelCase__( __A ):
pass
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
@require_tf
def snake_case__ ( self ) -> str:
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['start_positions', 'end_positions'] )
class UpperCamelCase__( __A ):
pass
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
@require_flax
def snake_case__ ( self ) -> List[Any]:
# Flax models don't have labels
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
class UpperCamelCase__( __A ):
pass
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
| 221 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Tuple = StableDiffusionDiffEditPipeline
__snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
__snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
__snake_case : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case : Tuple = frozenset([] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = DDIMInverseScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_zero=lowerCamelCase__ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any=0 ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor((1, 16, 16) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = floats_tensor((1, 2, 4, 16, 16) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Dict=0 ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" )
if str(lowerCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[Any]=0 ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" )
if str(lowerCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
if not hasattr(self.pipeline_class ,"""_optional_components""" ):
return
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(lowerCamelCase__ )
pipe_loaded.to(lowerCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase__ ,lowerCamelCase__ ) is None ,F"""`{optional_component}` did not stay set to None after loading.""" ,)
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = pipe_loaded(**lowerCamelCase__ )[0]
SCREAMING_SNAKE_CASE = np.abs(output - output_loaded ).max()
self.assertLess(lowerCamelCase__ ,1e-4 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_mask_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = pipe.generate_mask(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = mask[0, -3:, -3:]
self.assertEqual(mask.shape ,(1, 16, 16) )
SCREAMING_SNAKE_CASE = np.array([0] * 9 )
SCREAMING_SNAKE_CASE = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ ,1e-3 )
self.assertEqual(mask[0, -3, -4] ,0 )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = pipe.invert(**lowerCamelCase__ ).images
SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:]
self.assertEqual(image.shape ,(2, 32, 32, 3) )
SCREAMING_SNAKE_CASE = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] ,)
SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ ,1e-3 )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = {"""beta_start""": 0.00085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inversion_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = pipe.invert(**lowerCamelCase__ ).images
SCREAMING_SNAKE_CASE = image[0, -1, -3:, -3:]
self.assertEqual(image.shape ,(2, 32, 32, 3) )
SCREAMING_SNAKE_CASE = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] ,)
SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ ,1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
SCREAMING_SNAKE_CASE = raw_image.convert("""RGB""" ).resize((768, 768) )
SCREAMING_SNAKE_CASE = raw_image
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" ,safety_checker=lowerCamelCase__ ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE = DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """a bowl of fruit"""
SCREAMING_SNAKE_CASE = """a bowl of pears"""
SCREAMING_SNAKE_CASE = pipe.generate_mask(
image=self.raw_image ,source_prompt=lowerCamelCase__ ,target_prompt=lowerCamelCase__ ,generator=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = pipe.invert(
prompt=lowerCamelCase__ ,image=self.raw_image ,inpaint_strength=0.7 ,generator=lowerCamelCase__ ).latents
SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,image_latents=lowerCamelCase__ ,generator=lowerCamelCase__ ,negative_prompt=lowerCamelCase__ ,inpaint_strength=0.7 ,output_type="""numpy""" ,).images[0]
SCREAMING_SNAKE_CASE = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" ,safety_checker=lowerCamelCase__ ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """a bowl of fruit"""
SCREAMING_SNAKE_CASE = """a bowl of pears"""
SCREAMING_SNAKE_CASE = pipe.generate_mask(
image=self.raw_image ,source_prompt=lowerCamelCase__ ,target_prompt=lowerCamelCase__ ,generator=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = pipe.invert(
prompt=lowerCamelCase__ ,image=self.raw_image ,inpaint_strength=0.7 ,generator=lowerCamelCase__ ,num_inference_steps=25 ,).latents
SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,image_latents=lowerCamelCase__ ,generator=lowerCamelCase__ ,negative_prompt=lowerCamelCase__ ,inpaint_strength=0.7 ,num_inference_steps=25 ,output_type="""numpy""" ,).images[0]
SCREAMING_SNAKE_CASE = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 193 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Any = BertTokenizer
__snake_case : Dict = BertTokenizerFast
__snake_case : Tuple = True
__snake_case : List[Any] = True
__snake_case : Optional[Any] = filter_non_english
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase__ ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
# With lower casing
SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer()
SCREAMING_SNAKE_CASE = """a\n'll !!to?'d of, can't."""
SCREAMING_SNAKE_CASE = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=lowerCamelCase__ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ,lowerCamelCase__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(lowerCamelCase__ ,"""do_lower_case""" ) else False
SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCamelCase__ )
]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
| 193 | 1 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
A_ = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
A_ = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
A_ = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage="""https://github.com/krishnap25/mauve""", inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""predictions""": datasets.Value("""string""", id="""sequence""" ),
"""references""": datasets.Value("""string""", id="""sequence""" ),
} ), codebase_urls=["""https://github.com/krishnap25/mauve"""], reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
], )
def UpperCamelCase_ ( self: Tuple, a_: Any, a_: Dict, a_: int=None, a_: List[str]=None, a_: str=None, a_: List[str]=None, a_: Optional[Any]="auto", a_: str=-1, a_: Optional[Any]=0.9, a_: Optional[int]=5, a_: Union[str, Any]=500, a_: Dict="gpt2-large", a_: Optional[Any]=-1, a_: Union[str, Any]=1_024, a_: Tuple=25, a_: Any=5, a_: Tuple=True, a_: List[Any]=25, ):
'''simple docstring'''
_snake_case : Union[str, Any] = compute_mauve(
p_text=__a, q_text=__a, p_features=__a, q_features=__a, p_tokens=__a, q_tokens=__a, num_buckets=__a, pca_max_data=__a, kmeans_explained_var=__a, kmeans_num_redo=__a, kmeans_max_iter=__a, featurize_model_name=__a, device_id=__a, max_text_length=__a, divergence_curve_discretization_size=__a, mauve_scaling_factor=__a, verbose=__a, seed=__a, )
return out
| 64 |
lowerCamelCase : Tuple = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowerCamelCase : int = ['''a''', '''b''', '''c''', '''d''', '''e''']
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ):
__lowercase : Dict = start
# add current to visited
visited.append(lowerCAmelCase_ )
__lowercase : Dict = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowercase : List[Any] = topological_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# if all neighbors visited add current to sort
sort.append(lowerCAmelCase_ )
# if all vertices haven't been visited select a new one to visit
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
for vertice in vertices:
if vertice not in visited:
__lowercase : Tuple = topological_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# return sort
return sort
if __name__ == "__main__":
lowerCamelCase : Any = topological_sort('''a''', [], [])
print(sort) | 233 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def UpperCamelCase_( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_snake_case ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def UpperCamelCase_( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def UpperCamelCase_( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_snake_case ):
http_head('https://huggingface.co' )
| 367 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> Optional[Any]:
'''simple docstring'''
__a =()
for resnet, attn in zip(self.resnets , self.attentions ):
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case=True ) -> Optional[int]:
'''simple docstring'''
__a =()
for resnet in self.resnets:
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# there is always at least one resnet
__a =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__a =[]
for _ in range(self.num_layers ):
__a =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
__a =attentions
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[str]:
'''simple docstring'''
__a =self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 308 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__a = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 145 | '''simple docstring'''
def __UpperCAmelCase ( a_: str, a_: str ):
if len(a_ ) != len(a_ ):
raise ValueError("String lengths must match!" )
_UpperCAmelCase : Dict = 0
for chara, chara in zip(a_, a_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 145 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = LEDTokenizer
SCREAMING_SNAKE_CASE = LEDTokenizerFast
SCREAMING_SNAKE_CASE = True
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
__a =[
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__a =dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__a =["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__a ={"""unk_token""": """<unk>"""}
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def __magic_name__ ( self , **__snake_case ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def __magic_name__ ( self , **__snake_case ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> Dict:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__a =[0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a =tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='pt' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__a =batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a =tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' )
self.assertIn('input_ids' , __snake_case )
self.assertIn('attention_mask' , __snake_case )
self.assertNotIn('labels' , __snake_case )
self.assertNotIn('decoder_attention_mask' , __snake_case )
@require_torch
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =[
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a =tokenizer(text_target=__snake_case , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def __magic_name__ ( self ) -> str:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a =tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=__snake_case , truncation=__snake_case , return_tensors='pt' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =["""A long paragraph for summarization."""]
__a =[
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a =tokenizer(__snake_case , return_tensors='pt' )
__a =tokenizer(text_target=__snake_case , return_tensors='pt' )
__a =inputs["""input_ids"""]
__a =targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a =["""Summary of the text.""", """Another summary."""]
__a =[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__a =tokenizer(__snake_case , padding=__snake_case )
__a =[[0] * len(__snake_case ) for x in encoded_output["""input_ids"""]]
__a =tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs['global_attention_mask'] , __snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a ="""A, <mask> AllenNLP sentence."""
__a =tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
__a =tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__a =tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__a =tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
__snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 351 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def __magic_name__ ( *__snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : Image ):
"""simple docstring"""
__a =hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
__a =DepthEstimationPipeline(model=__snake_case , image_processor=__snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __magic_name__ ( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
__a =depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , __snake_case )
import datasets
__a =datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__a =depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , __snake_case , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
@require_torch
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a ='Intel/dpt-large'
__a =pipeline('depth-estimation' , model=__snake_case )
__a =depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
__a =hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 308 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase ):
if len(_lowerCAmelCase ) == 0:
return array
__lowerCAmelCase , __lowerCAmelCase = min(_lowerCAmelCase ), max(_lowerCAmelCase )
# Compute the variables
__lowerCAmelCase = _max - _min + 1
__lowerCAmelCase , __lowerCAmelCase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
__lowerCAmelCase = i - _min
__lowerCAmelCase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
__lowerCAmelCase = 0
for i in range(_lowerCAmelCase ):
while holes_repeat[i] > 0:
__lowerCAmelCase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ = input('''Enter numbers separated by comma:\n''')
SCREAMING_SNAKE_CASE_ = [int(x) for x in user_input.split(''',''')]
print(pigeon_sort(unsorted))
| 301 |
"""simple docstring"""
from math import pi, sqrt
def lowercase (_lowerCAmelCase ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_lowerCAmelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_lowerCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase ():
assert gamma(0.5 ) == sqrt(_lowerCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('''Gamma of: '''))
print(F"gamma({num}) = {gamma(num)}")
print('''\nEnter 0 to exit...''')
| 301 | 1 |
from collections import deque
class lowerCamelCase :
"""simple docstring"""
def __init__( self : int, _UpperCAmelCase : str, _UpperCAmelCase : int, _UpperCAmelCase : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = process_name # process name
SCREAMING_SNAKE_CASE__ : List[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
SCREAMING_SNAKE_CASE__ : str = arrival_time
SCREAMING_SNAKE_CASE__ : Dict = burst_time # remaining burst time
SCREAMING_SNAKE_CASE__ : List[Any] = 0 # total time of the process wait in ready queue
SCREAMING_SNAKE_CASE__ : List[str] = 0 # time from arrival time to completion time
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : list[int], _UpperCAmelCase : deque[Process], _UpperCAmelCase : int, ) -> None:
"""simple docstring"""
# total number of mlfq's queues
SCREAMING_SNAKE_CASE__ : Dict = number_of_queues
# time slice of queues that round robin algorithm applied
SCREAMING_SNAKE_CASE__ : Union[str, Any] = time_slices
# unfinished process is in this ready_queue
SCREAMING_SNAKE_CASE__ : Optional[int] = queue
# current time
SCREAMING_SNAKE_CASE__ : List[str] = current_time
# finished process is in this sequence queue
SCREAMING_SNAKE_CASE__ : deque[Process] = deque()
def A_ ( self : List[str] ) -> list[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def A_ ( self : Optional[Any], _UpperCAmelCase : list[Process] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def A_ ( self : Tuple, _UpperCAmelCase : list[Process] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def A_ ( self : Tuple, _UpperCAmelCase : list[Process] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def A_ ( self : Tuple, _UpperCAmelCase : deque[Process] ) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def A_ ( self : Dict, _UpperCAmelCase : Process ) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def A_ ( self : Any, _UpperCAmelCase : deque[Process] ) -> deque[Process]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : deque[Process] = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
SCREAMING_SNAKE_CASE__ : Any = 0
# set the process's turnaround time because it is finished
SCREAMING_SNAKE_CASE__ : str = self.current_time - cp.arrival_time
# set the completion time
SCREAMING_SNAKE_CASE__ : List[str] = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def A_ ( self : List[str], _UpperCAmelCase : deque[Process], _UpperCAmelCase : int ) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
SCREAMING_SNAKE_CASE__ : Tuple = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
SCREAMING_SNAKE_CASE__ : Dict = 0
# set the finish time
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.current_time
# update the process' turnaround time because it is finished
SCREAMING_SNAKE_CASE__ : int = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def A_ ( self : Optional[Any] ) -> deque[Process]:
"""simple docstring"""
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowerCamelCase : Tuple = Process('''P1''', 0, 5_3)
_lowerCamelCase : Optional[int] = Process('''P2''', 0, 1_7)
_lowerCamelCase : Any = Process('''P3''', 0, 6_8)
_lowerCamelCase : int = Process('''P4''', 0, 2_4)
_lowerCamelCase : Tuple = 3
_lowerCamelCase : List[Any] = [1_7, 2_5]
_lowerCamelCase : Optional[int] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
_lowerCamelCase : Union[str, Any] = Process('''P1''', 0, 5_3)
_lowerCamelCase : List[str] = Process('''P2''', 0, 1_7)
_lowerCamelCase : int = Process('''P3''', 0, 6_8)
_lowerCamelCase : Optional[int] = Process('''P4''', 0, 2_4)
_lowerCamelCase : Optional[int] = 3
_lowerCamelCase : int = [1_7, 2_5]
_lowerCamelCase : Any = deque([Pa, Pa, Pa, Pa])
_lowerCamelCase : str = MLFQ(number_of_queues, time_slices, queue, 0)
_lowerCamelCase : Dict = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
f"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 191 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def _a ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : set , SCREAMING_SNAKE_CASE__ : set , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : PriorityQueue , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : float | int , ) -> float | int:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cst_fwd.get(SCREAMING_SNAKE_CASE__ , np.inf )
SCREAMING_SNAKE_CASE__ : Optional[int] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
SCREAMING_SNAKE_CASE__ : List[Any] = new_cost_f
SCREAMING_SNAKE_CASE__ : Union[str, Any] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
SCREAMING_SNAKE_CASE__ : Optional[int] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = -1
SCREAMING_SNAKE_CASE__ : List[str] = set()
SCREAMING_SNAKE_CASE__ : List[Any] = set()
SCREAMING_SNAKE_CASE__ : int = {source: 0}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {destination: 0}
SCREAMING_SNAKE_CASE__ : List[Any] = {source: None}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {destination: None}
SCREAMING_SNAKE_CASE__ : PriorityQueue[Any] = PriorityQueue()
SCREAMING_SNAKE_CASE__ : PriorityQueue[Any] = PriorityQueue()
SCREAMING_SNAKE_CASE__ : Dict = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = queue_forward.get()
visited_forward.add(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = queue_backward.get()
visited_backward.add(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = pass_and_relaxation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : List[Any] = pass_and_relaxation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
SCREAMING_SNAKE_CASE__ : int = shortest_distance
return shortest_path_distance
_lowerCamelCase : Optional[Any] = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
_lowerCamelCase : Tuple = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
A__ = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""BeitFeatureExtractor"""]
A__ = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82 |
import random
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(snake_case__ ,snake_case__ ,snake_case__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __lowerCamelCase ( snake_case__ = 10_24 ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306 | 0 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def snake_case_ ( lowerCAmelCase_ : str ):
if hor == 128:
__lowercase : Any = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
__lowercase : List[Any] = (32, 128, 256)
__lowercase : Any = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
__lowercase : str = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
__lowercase : int = (32, 64, 128, 256)
__lowercase : int = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
__lowercase : List[str] = torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
__lowercase : Any = model.state_dict()
__lowercase : Union[str, Any] = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 65536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
__lowercase : Dict = UNetaDModel(**lowerCAmelCase_ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
__lowercase : Any = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowercase : Union[str, Any] = state_dict.pop(lowerCAmelCase_ )
hf_value_function.load_state_dict(lowerCAmelCase_ )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , """w""" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( ):
__lowercase : List[Any] = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 65536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
__lowercase : Optional[Any] = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
__lowercase : int = model
__lowercase : Union[str, Any] = UNetaDModel(**lowerCAmelCase_ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
__lowercase : str = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowercase : int = state_dict.pop(lowerCAmelCase_ )
hf_value_function.load_state_dict(lowerCAmelCase_ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function() | 369 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = (DPMSolverSDEScheduler,)
_A : Dict = 10
def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**__a )
return config
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.scheduler_classes[0]
__lowercase : List[str] = self.get_scheduler_config()
__lowercase : Any = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[Any] = self.dummy_model()
__lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Optional[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Optional[Any] = scheduler.step(__a , __a , __a )
__lowercase : str = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowercase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[int] = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Dict = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Dict = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[int] = model(__a , __a )
__lowercase : Optional[int] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : List[str] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config()
__lowercase : Optional[int] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : int = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowercase : int = scheduler.scale_model_input(__a , __a )
__lowercase : List[str] = model(__a , __a )
__lowercase : List[str] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : List[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.scheduler_classes[0]
__lowercase : List[Any] = self.get_scheduler_config()
__lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : List[str] = self.dummy_model()
__lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
__lowercase : str = sample.to(__a )
for t in scheduler.timesteps:
__lowercase : List[Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Any = scheduler.step(__a , __a , __a )
__lowercase : Optional[Any] = output.prev_sample
__lowercase : Any = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 | 306 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A : int = random.Random()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=1.0, _UpperCAmelCase=None, _UpperCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
lowerCAmelCase : str = global_rng
lowerCAmelCase : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __A ( unittest.TestCase ):
def __init__( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Union[str, Any]=400 , UpperCAmelCase_ : Optional[int]=2000 , UpperCAmelCase_ : List[str]=2048 , UpperCAmelCase_ : List[Any]=128 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=44100 , ):
lowerCAmelCase : str = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : str = min_seq_length
lowerCAmelCase : int = max_seq_length
lowerCAmelCase : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase : Tuple = spectrogram_length
lowerCAmelCase : Optional[Any] = feature_size
lowerCAmelCase : Any = num_audio_channels
lowerCAmelCase : str = hop_length
lowerCAmelCase : int = chunk_length
lowerCAmelCase : Optional[Any] = sampling_rate
def lowercase__ ( self : Optional[Any] ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Optional[int]=False ):
def _flatten(UpperCAmelCase_ : Optional[Any] ):
return list(itertools.chain(*UpperCAmelCase_ ) )
if equal_length:
lowerCAmelCase : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase : Dict = [np.asarray(UpperCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = TvltFeatureExtractor
def lowercase__ ( self : Dict ):
lowerCAmelCase : Dict = TvltFeatureExtractionTester(self )
def lowercase__ ( self : Dict ):
lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'feature_size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'hop_length' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'chunk_length' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'sampling_rate' ) )
def lowercase__ ( self : str ):
lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : Optional[int] = feat_extract_first.save_pretrained(UpperCAmelCase_ )[0]
check_json_file_has_correct_format(UpperCAmelCase_ )
lowerCAmelCase : Any = self.feature_extraction_class.from_pretrained(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = feat_extract_first.to_dict()
lowerCAmelCase : List[Any] = feat_extract_second.to_dict()
lowerCAmelCase : Any = dict_first.pop('mel_filters' )
lowerCAmelCase : str = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : List[str] = os.path.join(UpperCAmelCase_ , 'feat_extract.json' )
feat_extract_first.to_json_file(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = self.feature_extraction_class.from_json_file(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = feat_extract_first.to_dict()
lowerCAmelCase : Dict = feat_extract_second.to_dict()
lowerCAmelCase : Optional[Any] = dict_first.pop('mel_filters' )
lowerCAmelCase : Optional[Any] = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Optional[Any] ):
# Initialize feature_extractor
lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCAmelCase : Dict = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowerCAmelCase : List[str] = feature_extractor(UpperCAmelCase_ , return_tensors='np' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowerCAmelCase : Tuple = feature_extractor(
UpperCAmelCase_ , return_tensors='np' , sampling_rate=44100 , mask_audio=UpperCAmelCase_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowerCAmelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCAmelCase : Optional[int] = np.asarray(UpperCAmelCase_ )
lowerCAmelCase : List[str] = feature_extractor(UpperCAmelCase_ , return_tensors='np' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowercase__ ( self : Dict , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : Optional[int] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase : List[str] = ds.sort('id' ).select(range(UpperCAmelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowercase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = self._load_datasamples(1 )
lowerCAmelCase : Union[str, Any] = TvltFeatureExtractor()
lowerCAmelCase : List[Any] = feature_extractor(UpperCAmelCase_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
lowerCAmelCase : List[Any] = torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCAmelCase_ , atol=1E-4 ) )
| 138 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
lowerCAmelCase : Optional[int] = 1_024
lowerCAmelCase : Tuple = 4_096
lowerCAmelCase : Optional[int] = 24
lowerCAmelCase : Optional[int] = 16
lowerCAmelCase : str = [5, 11, 17, 23]
lowerCAmelCase : Tuple = [256, 512, 1_024, 1_024]
lowerCAmelCase : Optional[int] = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCAmelCase : Optional[int] = 768
lowerCAmelCase : int = [1, 1, 1, 0.5]
lowerCAmelCase : List[Any] = [256, 512, 768, 768]
lowerCAmelCase : List[Any] = 150
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : Union[str, Any] = (1, 384, 384)
lowerCAmelCase : Tuple = False
lowerCAmelCase : List[str] = 'project'
if "ade" in checkpoint_url:
lowerCAmelCase : Tuple = True
lowerCAmelCase : str = 768
lowerCAmelCase : List[str] = [1, 1, 1, 0.5]
lowerCAmelCase : Optional[Any] = 150
lowerCAmelCase : List[str] = 16
lowerCAmelCase : Dict = 'huggingface/label-files'
lowerCAmelCase : Optional[Any] = 'ade20k-id2label.json'
lowerCAmelCase : Tuple = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase, _UpperCAmelCase, repo_type='dataset' ) ), 'r' ) )
lowerCAmelCase : Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase : int = idalabel
lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
lowerCAmelCase : int = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : List[str] = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCAmelCase : Optional[int] = name.replace('pretrained.model', 'dpt.encoder' )
if "pretrained.model" in name:
lowerCAmelCase : Dict = name.replace('pretrained.model', 'dpt.embeddings' )
if "patch_embed" in name:
lowerCAmelCase : int = name.replace('patch_embed', '' )
if "pos_embed" in name:
lowerCAmelCase : Any = name.replace('pos_embed', 'position_embeddings' )
if "attn.proj" in name:
lowerCAmelCase : str = name.replace('attn.proj', 'attention.output.dense' )
if "proj" in name and "project" not in name:
lowerCAmelCase : Union[str, Any] = name.replace('proj', 'projection' )
if "blocks" in name:
lowerCAmelCase : List[str] = name.replace('blocks', 'layer' )
if "mlp.fc1" in name:
lowerCAmelCase : Optional[Any] = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase : Any = name.replace('mlp.fc2', 'output.dense' )
if "norm1" in name and "backbone" not in name:
lowerCAmelCase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
lowerCAmelCase : str = name.replace('norm2', 'layernorm_after' )
if "scratch.output_conv" in name:
lowerCAmelCase : int = name.replace('scratch.output_conv', 'head' )
if "scratch" in name:
lowerCAmelCase : Optional[int] = name.replace('scratch', 'neck' )
if "layer1_rn" in name:
lowerCAmelCase : int = name.replace('layer1_rn', 'convs.0' )
if "layer2_rn" in name:
lowerCAmelCase : Optional[Any] = name.replace('layer2_rn', 'convs.1' )
if "layer3_rn" in name:
lowerCAmelCase : List[str] = name.replace('layer3_rn', 'convs.2' )
if "layer4_rn" in name:
lowerCAmelCase : int = name.replace('layer4_rn', 'convs.3' )
if "refinenet" in name:
lowerCAmelCase : Optional[int] = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCAmelCase : Any = name.replace(f"refinenet{layer_idx}", f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
lowerCAmelCase : Dict = name.replace('out_conv', 'projection' )
if "resConfUnit1" in name:
lowerCAmelCase : Optional[int] = name.replace('resConfUnit1', 'residual_layer1' )
if "resConfUnit2" in name:
lowerCAmelCase : List[str] = name.replace('resConfUnit2', 'residual_layer2' )
if "conv1" in name:
lowerCAmelCase : List[Any] = name.replace('conv1', 'convolution1' )
if "conv2" in name:
lowerCAmelCase : Optional[int] = name.replace('conv2', 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCAmelCase : Union[str, Any] = name.replace('pretrained.act_postprocess1.0.project.0', 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCAmelCase : Optional[Any] = name.replace('pretrained.act_postprocess2.0.project.0', 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCAmelCase : List[Any] = name.replace('pretrained.act_postprocess3.0.project.0', 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCAmelCase : Optional[Any] = name.replace('pretrained.act_postprocess4.0.project.0', 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCAmelCase : Tuple = name.replace('pretrained.act_postprocess1.3', 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
lowerCAmelCase : str = name.replace('pretrained.act_postprocess1.4', 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
lowerCAmelCase : int = name.replace('pretrained.act_postprocess2.3', 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
lowerCAmelCase : Optional[Any] = name.replace('pretrained.act_postprocess2.4', 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
lowerCAmelCase : List[str] = name.replace('pretrained.act_postprocess3.3', 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
lowerCAmelCase : List[str] = name.replace('pretrained.act_postprocess4.3', 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
lowerCAmelCase : List[str] = name.replace('pretrained.act_postprocess4.4', 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
lowerCAmelCase : int = name.replace('pretrained', 'dpt' )
if "bn" in name:
lowerCAmelCase : List[str] = name.replace('bn', 'batch_norm' )
if "head" in name:
lowerCAmelCase : Any = name.replace('head', 'head.head' )
if "encoder.norm" in name:
lowerCAmelCase : Dict = name.replace('encoder.norm', 'layernorm' )
if "auxlayer" in name:
lowerCAmelCase : Tuple = name.replace('auxlayer', 'auxiliary_head.head' )
if "backbone" in name:
lowerCAmelCase : Tuple = name.replace('backbone', 'backbone.bit.encoder' )
if ".." in name:
lowerCAmelCase : Optional[Any] = name.replace('..', '.' )
if "stem.conv" in name:
lowerCAmelCase : List[str] = name.replace('stem.conv', 'bit.embedder.convolution' )
if "blocks" in name:
lowerCAmelCase : Dict = name.replace('blocks', 'layers' )
if "convolution" in name and "backbone" in name:
lowerCAmelCase : Dict = name.replace('convolution', 'conv' )
if "layer" in name and "backbone" in name:
lowerCAmelCase : Dict = name.replace('layer', 'layers' )
if "backbone.bit.encoder.bit" in name:
lowerCAmelCase : List[str] = name.replace('backbone.bit.encoder.bit', 'backbone.bit' )
if "embedder.conv" in name:
lowerCAmelCase : Any = name.replace('embedder.conv', 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
lowerCAmelCase : Optional[int] = name.replace('backbone.bit.encoder.stem.norm', 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : List[Any] = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
lowerCAmelCase : int = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase : Dict = in_proj_bias[: config.hidden_size]
lowerCAmelCase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : List[str] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase : Tuple = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : List[str] = get_dpt_config(_UpperCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCAmelCase : str = torch.load(_UpperCAmelCase, map_location='cpu' )
# remove certain keys
remove_ignore_keys_(_UpperCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase : str = state_dict.pop(_UpperCAmelCase )
lowerCAmelCase : int = val
# read in qkv matrices
read_in_q_k_v(_UpperCAmelCase, _UpperCAmelCase )
# load HuggingFace model
lowerCAmelCase : int = DPTForSemanticSegmentation(_UpperCAmelCase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# Check outputs on an image
lowerCAmelCase : str = 480 if 'ade' in checkpoint_url else 384
lowerCAmelCase : Dict = DPTImageProcessor(size=_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = prepare_img()
lowerCAmelCase : Union[str, Any] = image_processor(_UpperCAmelCase, return_tensors='pt' )
# forward pass
lowerCAmelCase : Optional[Any] = model(**_UpperCAmelCase ).logits if 'ade' in checkpoint_url else model(**_UpperCAmelCase ).predicted_depth
if show_prediction:
lowerCAmelCase : str = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ), size=(image.size[1], image.size[0]), mode='bicubic', align_corners=_UpperCAmelCase, )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
__A : Dict = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 138 | 1 |
import argparse
import os
import re
SCREAMING_SNAKE_CASE : List[Any] = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
SCREAMING_SNAKE_CASE : Dict = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
SCREAMING_SNAKE_CASE : int = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def UpperCamelCase ( _a , _a = False ) -> str:
'''simple docstring'''
with open(a__ , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ :Optional[Any] = f.read()
lowercase_ :Union[str, Any] = content.split('''\n''' )
lowercase_ :Dict = []
lowercase_ :int = 0
while line_idx < len(a__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowercase_ :Tuple = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowercase_ :str = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowercase_ :Tuple = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowercase_ :Tuple = sorted(a__ , key=lambda _a : _re_identifier.search(a__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(a__ ) )
elif "\n".join(a__ ) != content:
return True
def UpperCamelCase ( _a = False ) -> List[str]:
'''simple docstring'''
lowercase_ :List[Any] = [os.path.join(a__ , a__ ) for f in os.listdir(a__ ) if f.endswith('''.py''' )]
lowercase_ :List[str] = [sort_auto_mapping(a__ , overwrite=a__ ) for fname in fnames]
if not overwrite and any(a__ ):
lowercase_ :List[Any] = [f for f, d in zip(a__ , a__ ) if d]
raise ValueError(
f"The following files have auto mappings that need sorting: {', '.join(a__ )}. Run `make style` to fix"
''' this.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 360 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , **UpperCamelCase_ ):
requires_backends(self , ['''bs4'''] )
super().__init__(**UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Optional[int] = []
lowercase_ :Union[str, Any] = []
lowercase_ :Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowercase_ :Any = parent.find_all(child.name , recursive=UpperCamelCase_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase_ ) else next(i for i, s in enumerate(UpperCamelCase_ , 1 ) if s is child ) )
lowercase_ :str = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Dict = BeautifulSoup(UpperCamelCase_ , '''html.parser''' )
lowercase_ :Union[str, Any] = []
lowercase_ :Union[str, Any] = []
lowercase_ :List[Any] = []
for element in html_code.descendants:
if type(UpperCamelCase_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowercase_ :Dict = html.unescape(UpperCamelCase_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase_ )
lowercase_ , lowercase_ :Tuple = self.xpath_soup(UpperCamelCase_ )
stringaxtag_seq.append(UpperCamelCase_ )
stringaxsubs_seq.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = ''''''
for tagname, subs in zip(UpperCamelCase_ , UpperCamelCase_ ):
xpath += f"/{tagname}"
if subs != 0:
xpath += f"[{subs}]"
return xpath
def __call__( self , UpperCamelCase_ ):
lowercase_ :Dict = False
# Check that strings has a valid type
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[Any] = True
elif isinstance(UpperCamelCase_ , (list, tuple) ):
if len(UpperCamelCase_ ) == 0 or isinstance(html_strings[0] , UpperCamelCase_ ):
lowercase_ :Tuple = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"but is of type {type(UpperCamelCase_ )}." )
lowercase_ :List[Any] = bool(isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase_ )) )
if not is_batched:
lowercase_ :Dict = [html_strings]
# Get nodes + xpaths
lowercase_ :List[Any] = []
lowercase_ :List[str] = []
for html_string in html_strings:
lowercase_ , lowercase_ , lowercase_ :List[str] = self.get_three_from_single(UpperCamelCase_ )
nodes.append(UpperCamelCase_ )
lowercase_ :str = []
for node, tag_list, sub_list in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :str = self.construct_xpath(UpperCamelCase_ , UpperCamelCase_ )
xpath_strings.append(UpperCamelCase_ )
xpaths.append(UpperCamelCase_ )
# return as Dict
lowercase_ :int = {'''nodes''': nodes, '''xpaths''': xpaths}
lowercase_ :Optional[int] = BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
return encoded_inputs
| 252 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self , a__ , a__=13 , a__=32 , a__=2 , a__=3 , a__=16 , a__=[32, 64, 128] , a__=[1, 2, 1] , a__=[2, 2, 4] , a__=2 , a__=2.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=True , a__=0.0_2 , a__=1e-5 , a__=True , a__=None , a__=True , a__=10 , a__=8 , a__=["stage1", "stage2"] , a__=[1, 2] , ) -> Any:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = num_heads
snake_case_ = window_size
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = use_absolute_embeddings
snake_case_ = patch_norm
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = is_training
snake_case_ = scope
snake_case_ = use_labels
snake_case_ = type_sequence_label_size
snake_case_ = encoder_stride
snake_case_ = out_features
snake_case_ = out_indices
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = FocalNetModel(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
snake_case_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Dict:
'''simple docstring'''
snake_case_ = FocalNetBackbone(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
snake_case_ = None
snake_case_ = FocalNetBackbone(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = FocalNetForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = FocalNetForMaskedImageModeling(a__ )
model.to(a__ )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(a__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> List[str]:
'''simple docstring'''
snake_case_ = self.type_sequence_label_size
snake_case_ = FocalNetForImageClassification(a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = FocalNetForImageClassification(a__ )
model.to(a__ )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Union[str, Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Union[str, Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Any = False
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = FocalNetModelTester(self )
snake_case_ = ConfigTester(self , config_class=a__ , embed_dim=37 , has_text_modality=a__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case_ = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case_ = model_class(a__ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> str:
'''simple docstring'''
snake_case_ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(a__ , a__ ) )
snake_case_ = outputs.hidden_states
snake_case_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a__ ) , a__ )
# FocalNet has a different seq_length
snake_case_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case_ = outputs.reshaped_hidden_states
self.assertEqual(len(a__ ) , a__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = reshaped_hidden_states[0].shape
snake_case_ = (
reshaped_hidden_states[0].view(a__ , a__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
snake_case_ = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
snake_case_ = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
@slow
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = FocalNetModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(a__ )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=a__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(a__ )
snake_case_ = self.default_image_processor
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
snake_case_ = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
snake_case_ = model(**a__ )
# verify the logits
snake_case_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a__ )
snake_case_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class _snake_case ( lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ : List[Any] = FocalNetConfig
lowerCAmelCase_ : Tuple = False
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = FocalNetModelTester(self )
| 85 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Tuple = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
_SCREAMING_SNAKE_CASE : List[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
UpperCamelCase = """segformer.encoder.""" + key
if key.startswith("""backbone""" ):
UpperCamelCase = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCamelCase = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(UpperCamelCase_ )-1}""" )
if "norm" in key:
UpperCamelCase = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
UpperCamelCase = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(UpperCamelCase_ )-1}""" )
if "layer_norm1" in key:
UpperCamelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCamelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase = key[key.find("""block""" ) + len("""block""" )]
UpperCamelCase = key.replace(f"""block{idx}""" , f"""block.{int(UpperCamelCase_ )-1}""" )
if "attn.q" in key:
UpperCamelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCamelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCamelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCamelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCamelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCamelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCamelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCamelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCamelCase = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(UpperCamelCase_ )-1}""" )
if key.startswith("""head""" ):
UpperCamelCase = key.replace("""head""" , """classifier""" )
UpperCamelCase = value
return new_state_dict
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase = state_dict.pop(f"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
UpperCamelCase = state_dict.pop(f"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
UpperCamelCase = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase = kv_bias[
config.hidden_sizes[i] :
]
def lowercase( ) -> int:
'''simple docstring'''
UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return image
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = SegformerConfig()
UpperCamelCase = False
# set attributes based on model_name
UpperCamelCase = """huggingface/label-files"""
if "segformer" in model_name:
UpperCamelCase = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
UpperCamelCase = 150
UpperCamelCase = """ade20k-id2label.json"""
UpperCamelCase = (1, 150, 128, 128)
elif "city" in model_name:
UpperCamelCase = 19
UpperCamelCase = """cityscapes-id2label.json"""
UpperCamelCase = (1, 19, 128, 128)
else:
raise ValueError(f"""Model {model_name} not supported""" )
elif "mit" in model_name:
UpperCamelCase = True
UpperCamelCase = model_name[4:6]
UpperCamelCase = 1000
UpperCamelCase = """imagenet-1k-id2label.json"""
UpperCamelCase = (1, 1000)
else:
raise ValueError(f"""Model {model_name} not supported""" )
# set config attributes
UpperCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
UpperCamelCase = [64, 128, 320, 512]
UpperCamelCase = 256
elif size == "b2":
UpperCamelCase = [64, 128, 320, 512]
UpperCamelCase = 768
UpperCamelCase = [3, 4, 6, 3]
elif size == "b3":
UpperCamelCase = [64, 128, 320, 512]
UpperCamelCase = 768
UpperCamelCase = [3, 4, 18, 3]
elif size == "b4":
UpperCamelCase = [64, 128, 320, 512]
UpperCamelCase = 768
UpperCamelCase = [3, 8, 27, 3]
elif size == "b5":
UpperCamelCase = [64, 128, 320, 512]
UpperCamelCase = 768
UpperCamelCase = [3, 6, 40, 3]
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor (only resize + normalize)
UpperCamelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase_ , align=UpperCamelCase_ , do_random_crop=UpperCamelCase_ )
# prepare image
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
UpperCamelCase = torch.load(UpperCamelCase_ , map_location=torch.device("""cpu""" ) )
else:
UpperCamelCase = torch.load(UpperCamelCase_ , map_location=torch.device("""cpu""" ) )["""state_dict"""]
# rename keys
UpperCamelCase = rename_keys(UpperCamelCase_ , encoder_only=UpperCamelCase_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase_ , UpperCamelCase_ )
# create HuggingFace model and load state dict
if encoder_only:
UpperCamelCase = False
UpperCamelCase = SegformerForImageClassification(UpperCamelCase_ )
else:
UpperCamelCase = SegformerForSemanticSegmentation(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
# forward pass
UpperCamelCase = model(UpperCamelCase_ )
UpperCamelCase = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
UpperCamelCase = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
UpperCamelCase = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -10.3529, -10.0304], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
UpperCamelCase = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
UpperCamelCase = torch.tensor(
[
[[-9.0_8_7_8, -10.2081, -10.1891], [-9.3_1_4_4, -10.7941, -10.9843], [-9.2_2_9_4, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
UpperCamelCase = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
UpperCamelCase = torch.tensor(
[
[[-9.5_5_2_4, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5_8_4_2, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
UpperCamelCase = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
UpperCamelCase = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -10.1717], [-9.4_4_3_8, -10.9058, -11.4047], [-9.7_9_3_9, -12.3495, -12.1079]],
[[-7.1_5_1_4, -9.5_3_3_6, -10.0860], [-9.7_7_7_6, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
UpperCamelCase = torch.tensor(
[
[
[-1.1372E01, -1.2787E01, -1.3477E01],
[-1.2536E01, -1.4194E01, -1.4409E01],
[-1.3217E01, -1.4888E01, -1.5327E01],
],
[
[-1.4791E01, -1.7122E01, -1.8277E01],
[-1.7163E01, -1.9192E01, -1.9533E01],
[-1.7897E01, -1.9991E01, -2.0315E01],
],
[
[7.6723E-01, 4.1921E-01, -7.7878E-02],
[4.7772E-01, 9.5557E-03, -2.8082E-01],
[3.6032E-01, -2.4826E-01, -5.1168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
UpperCamelCase = torch.tensor(
[
[[-9.4_9_5_9, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8_9_0_5, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
UpperCamelCase = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
UpperCamelCase = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
UpperCamelCase = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
UpperCamelCase = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
UpperCamelCase = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
UpperCamelCase = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 353 | from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size , lowerCamelCase_ ):
UpperCamelCase = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCamelCase = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase = self.scheduler.step(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , eta=lowerCamelCase_ , use_clipped_model_output=lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 165 | 0 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowercase__ ( __snake_case : Optional[int] ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowercase__ ( ):
'''simple docstring'''
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
UpperCAmelCase_ : str = [1, 2, 3]
with pytest.raises(__snake_case ):
with parallel_backend('unsupported backend' ):
map_nested(__snake_case , __snake_case , num_proc=2 )
with pytest.raises(__snake_case ):
with parallel_backend('unsupported backend' ):
map_nested(__snake_case , __snake_case , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Dict = [1, 2]
UpperCAmelCase_ : Any = {'a': 1, 'b': 2}
UpperCAmelCase_ : Any = {'a': [1, 2], 'b': [3, 4]}
UpperCAmelCase_ : Optional[Any] = {'a': {'1': 1}, 'b': 2}
UpperCAmelCase_ : Optional[int] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
UpperCAmelCase_ : Tuple = [2, 3]
UpperCAmelCase_ : Optional[int] = {'a': 2, 'b': 3}
UpperCAmelCase_ : Optional[int] = {'a': [2, 3], 'b': [4, 5]}
UpperCAmelCase_ : int = {'a': {'1': 2}, 'b': 3}
UpperCAmelCase_ : str = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
| 29 |
from typing import Any
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list:
"""simple docstring"""
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
_lowercase ={}
_lowercase ={}
for state in states_space:
_lowercase =observations_space[0]
_lowercase =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowercase =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
_lowercase =observations_space[o]
_lowercase =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
# Update probabilities and pointers dicts
_lowercase =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowercase =arg_max
# The final observation
_lowercase =observations_space[len(__snake_case ) - 1]
# argmax for given final observation
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
_lowercase =arg_max
# Process pointers backwards
_lowercase =last_state
_lowercase =[]
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
_lowercase =pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a list"
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
_lowercase =F"{var_name} must be a list of strings"
raise ValueError(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a dict"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
_lowercase =F"{var_name} all keys must be strings"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
_lowercase ='''nested dictionary ''' if nested else ''''''
_lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 0 |
def UpperCamelCase ( _A = 1, _A = 1000 ):
"""simple docstring"""
__magic_name__ : Optional[int] = 1
__magic_name__ : Dict = 0
for divide_by_number in range(_A, digit + 1 ):
__magic_name__ : list[int] = []
__magic_name__ : Any = numerator
for _ in range(1, digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_A ):
__magic_name__ : int = len(_A )
__magic_name__ : Dict = divide_by_number
else:
has_been_divided.append(_A )
__magic_name__ : Optional[int] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : str = StableUnCLIPImgaImgPipeline
lowercase__ : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ : Union[str, Any] = frozenset([] )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Any = 32
__magic_name__ : Union[str, Any] = embedder_hidden_size
# image encoding components
__magic_name__ : Optional[int] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCAmelCase__ , projection_dim=lowerCAmelCase__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__magic_name__ : Any = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase__ )
__magic_name__ : int = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
__magic_name__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__magic_name__ : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase__ , layers_per_block=1 , upcast_attention=lowerCAmelCase__ , use_linear_projection=lowerCAmelCase__ , )
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , )
torch.manual_seed(0 )
__magic_name__ : List[str] = AutoencoderKL()
__magic_name__ : List[str] = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 , lowerCAmelCase__=True ) -> List[Any]:
if str(lowerCAmelCase__ ).startswith("""mps""" ):
__magic_name__ : Optional[int] = torch.manual_seed(lowerCAmelCase__ )
else:
__magic_name__ : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__magic_name__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if pil_image:
__magic_name__ : Optional[Any] = input_image * 0.5 + 0.5
__magic_name__ : int = input_image.clamp(0 , 1 )
__magic_name__ : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ : Optional[int] = DiffusionPipeline.numpy_to_pil(lowerCAmelCase__ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : List[str] = self.get_dummy_components()
__magic_name__ : int = StableUnCLIPImgaImgPipeline(**lowerCAmelCase__ )
__magic_name__ : List[str] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : Any = self.get_dummy_inputs(lowerCAmelCase__ )
inputs.update({"""image_embeds""": None} )
__magic_name__ : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__magic_name__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ : int = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __magic_name__ ( self ) -> Dict:
__magic_name__ : int = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Tuple = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase__ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCAmelCase__ )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
__magic_name__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
__magic_name__ : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__magic_name__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ : Union[str, Any] = pipe(lowerCAmelCase__ , """anime turle""" , generator=lowerCAmelCase__ , output_type="""np""" )
__magic_name__ : Optional[Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
__magic_name__ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
__magic_name__ : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__magic_name__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ : Any = pipe(lowerCAmelCase__ , """anime turle""" , generator=lowerCAmelCase__ , output_type="""np""" )
__magic_name__ : Any = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
__magic_name__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__ : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
__magic_name__ : int = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__magic_name__ : List[Any] = pipe(
lowerCAmelCase__ , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
__magic_name__ : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 138 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'fnet'
def __init__(self , _lowerCamelCase=32000 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu_new" , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=4 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=False , _lowerCamelCase=512 , _lowerCamelCase=3 , _lowerCamelCase=1 , _lowerCamelCase=2 , **_lowerCamelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : str = num_hidden_layers
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : List[str] = type_vocab_size
UpperCAmelCase__ : Dict = layer_norm_eps
UpperCAmelCase__ : int = use_tpu_fourier_optimizations
UpperCAmelCase__ : Any = tpu_short_seq_length
| 171 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=16 , _lowerCamelCase=[1, 2, 1] , _lowerCamelCase=[2, 2, 4] , _lowerCamelCase=2 , _lowerCamelCase=2.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=10 , _lowerCamelCase=8 , _lowerCamelCase=["stage1", "stage2", "stage3"] , _lowerCamelCase=[1, 2, 3] , ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : Tuple = patch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Dict = embed_dim
UpperCAmelCase__ : List[Any] = depths
UpperCAmelCase__ : Dict = num_heads
UpperCAmelCase__ : Any = window_size
UpperCAmelCase__ : str = mlp_ratio
UpperCAmelCase__ : str = qkv_bias
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = drop_path_rate
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Union[str, Any] = use_absolute_embeddings
UpperCAmelCase__ : Optional[int] = patch_norm
UpperCAmelCase__ : Any = layer_norm_eps
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Tuple = is_training
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Tuple = encoder_stride
UpperCAmelCase__ : Optional[int] = out_features
UpperCAmelCase__ : str = out_indices
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Dict = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _a (self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = MaskFormerSwinModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Dict = model(_lowerCamelCase )
UpperCAmelCase__ : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase__ : List[str] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = MaskFormerSwinBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Tuple = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowerCamelCase ):
UpperCAmelCase__ : Union[str, Any] = ["""stem"""]
UpperCAmelCase__ : List[Any] = MaskFormerSwinBackbone(config=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = MaskFormerSwinModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=_lowerCamelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a (self ):
"""simple docstring"""
return
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Any = [*signature.parameters.keys()]
UpperCAmelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCAmelCase__ : str = outputs.hidden_states
UpperCAmelCase__ : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# Swin has a different seq_length
UpperCAmelCase__ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Optional[Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Tuple = 3
UpperCAmelCase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : List[Any] = True
self.check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = 0
return t
def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ):
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : str = model(**_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple()
def recursive_check(_lowerCamelCase , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowerCamelCase ) , set_nan_tensor_to_zero(_lowerCamelCase ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}. Dict has"""
F""" `nan`: {torch.isnan(_lowerCamelCase ).any()} and `inf`: {torch.isinf(_lowerCamelCase )}."""
) , )
recursive_check(_lowerCamelCase , _lowerCamelCase )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
UpperCAmelCase__ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCAmelCase__ : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
@require_torch
class lowerCamelCase ( unittest.TestCase , lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = MaskFormerSwinModelTester(self )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = backbone_class(_lowerCamelCase )
backbone.to(_lowerCamelCase )
backbone.eval()
UpperCAmelCase__ : int = backbone(**_lowerCamelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowerCamelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCAmelCase__ : List[str] = backbone(**_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCAmelCase__ : List[str] = backbone(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertIsNotNone(outputs.attentions )
| 171 | 1 |
import random
def _a ( lowerCamelCase: int , lowerCamelCase: float , lowerCamelCase: bool = False ) -> dict:
'''simple docstring'''
__A = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def _a ( lowerCamelCase: int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
import math
def _a ( lowerCamelCase: int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( lowerCamelCase: float = 0.1 ) -> int:
'''simple docstring'''
__A = 3
__A = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "openai/whisper-base"
SCREAMING_SNAKE_CASE = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
SCREAMING_SNAKE_CASE = "transcriber"
SCREAMING_SNAKE_CASE = WhisperProcessor
SCREAMING_SNAKE_CASE = WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE = ["audio"]
SCREAMING_SNAKE_CASE = ["text"]
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Any:
return self.pre_processor(__lowerCAmelCase , return_tensors='''pt''' ).input_features
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Dict:
return self.model.generate(inputs=__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[Any]:
return self.pre_processor.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )[0]
| 198 | '''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a: Optional[Any] = 16
__a: Any = 32
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 16 ):
lowercase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : List[Any] = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : str = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : Dict = 8
else:
lowercase__ : Optional[int] = None
return tokenizer.pad(
UpperCAmelCase , padding='''longest''' , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : str = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
lowercase__ : Optional[int] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a: Tuple = mocked_dataloaders # noqa: F811
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , UpperCAmelCase ) == "1":
lowercase__ : Optional[int] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowercase__ : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
lowercase__ : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : int = config['''lr''']
lowercase__ : Optional[int] = int(config['''num_epochs'''] )
lowercase__ : Optional[Any] = int(config['''seed'''] )
lowercase__ : int = int(config['''batch_size'''] )
set_seed(UpperCAmelCase )
lowercase__ , lowercase__ : str = get_dataloaders(UpperCAmelCase , UpperCAmelCase )
lowercase__ : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
lowercase__ : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase__ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
lowercase__ : Any = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : List[str] = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[Any] = AdamW(params=model.parameters() , lr=UpperCAmelCase )
# Instantiate scheduler
lowercase__ : List[str] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowercase__ : Optional[Any] = os.path.split(UpperCAmelCase )[-1].split('''.''' )[0]
accelerator.init_trackers(UpperCAmelCase , UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowercase__ : str = 0
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : List[str] = model(**UpperCAmelCase )
lowercase__ : List[str] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowercase__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : List[str] = model(**UpperCAmelCase )
lowercase__ : Optional[int] = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCAmelCase , references=UpperCAmelCase , )
lowercase__ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(UpperCAmelCase ),
'''epoch''': epoch,
} , step=UpperCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __UpperCamelCase ( ):
lowercase__ : Any = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCAmelCase , default=UpperCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=UpperCAmelCase , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
lowercase__ : str = parser.parse_args()
lowercase__ : Tuple = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 198 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_snake_case : Optional[Any] = "Create a default config file for Accelerate with only a few flags set."
def lowerCAmelCase_ ( __lowerCamelCase="no" , __lowerCamelCase = default_json_config_file , __lowerCamelCase = False ):
__snake_case : int = Path(__lowerCamelCase )
path.parent.mkdir(parents=__lowerCamelCase , exist_ok=__lowerCamelCase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
__snake_case : Any = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
__snake_case : Optional[int] = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
__snake_case : Dict = torch.cuda.device_count()
__snake_case : Tuple = num_gpus
__snake_case : List[str] = False
if num_gpus > 1:
__snake_case : Optional[int] = "MULTI_GPU"
else:
__snake_case : Dict = "NO"
elif is_xpu_available() and use_xpu:
__snake_case : List[str] = torch.xpu.device_count()
__snake_case : str = num_xpus
__snake_case : int = False
if num_xpus > 1:
__snake_case : Optional[int] = "MULTI_XPU"
else:
__snake_case : str = "NO"
elif is_npu_available():
__snake_case : Any = torch.npu.device_count()
__snake_case : str = num_npus
__snake_case : str = False
if num_npus > 1:
__snake_case : Optional[int] = "MULTI_NPU"
else:
__snake_case : int = "NO"
else:
__snake_case : List[Any] = 0
__snake_case : Dict = True
__snake_case : Tuple = 1
__snake_case : Tuple = "NO"
__snake_case : str = ClusterConfig(**__lowerCamelCase )
config.to_json_file(__lowerCamelCase )
return path
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[Any] = parser.add_parser("default" , parents=__lowerCamelCase , help=__lowerCamelCase , formatter_class=__lowerCamelCase )
parser.add_argument(
"--config_file" , default=__lowerCamelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=__lowerCamelCase , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=__lowerCamelCase )
return parser
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 134 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : List[str] = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = "encodec"
def __init__( self : Any , lowerCamelCase : Optional[int]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCamelCase : List[str]=24000 , lowerCamelCase : int=1 , lowerCamelCase : Optional[int]=False , lowerCamelCase : Dict=None , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[int]=128 , lowerCamelCase : Optional[int]=32 , lowerCamelCase : List[str]=1 , lowerCamelCase : str=[8, 5, 4, 2] , lowerCamelCase : List[str]="weight_norm" , lowerCamelCase : Any=7 , lowerCamelCase : Tuple=7 , lowerCamelCase : int=3 , lowerCamelCase : int=2 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[Any]="reflect" , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : int=1.0 , lowerCamelCase : Optional[Any]=1024 , lowerCamelCase : Optional[Any]=None , lowerCamelCase : str=True , **lowerCamelCase : Dict , ) -> Any:
__snake_case : Tuple = target_bandwidths
__snake_case : Union[str, Any] = sampling_rate
__snake_case : Union[str, Any] = audio_channels
__snake_case : Dict = normalize
__snake_case : List[Any] = chunk_length_s
__snake_case : Tuple = overlap
__snake_case : Optional[int] = hidden_size
__snake_case : List[Any] = num_filters
__snake_case : Union[str, Any] = num_residual_layers
__snake_case : Optional[int] = upsampling_ratios
__snake_case : List[str] = norm_type
__snake_case : Optional[int] = kernel_size
__snake_case : Dict = last_kernel_size
__snake_case : Tuple = residual_kernel_size
__snake_case : List[Any] = dilation_growth_rate
__snake_case : Optional[int] = use_causal_conv
__snake_case : Tuple = pad_mode
__snake_case : Union[str, Any] = compress
__snake_case : Union[str, Any] = num_lstm_layers
__snake_case : int = trim_right_ratio
__snake_case : Tuple = codebook_size
__snake_case : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
__snake_case : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**lowerCamelCase )
@property
def __snake_case ( self : int ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __snake_case ( self : Optional[Any] ) -> int:
__snake_case : Union[str, Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __snake_case ( self : Optional[Any] ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 134 | 1 |
"""simple docstring"""
import torch
from torch import nn
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Any ,_snake_case : str ,_snake_case : List[Any] ,_snake_case : str ,_snake_case : Optional[Any]=1 ,_snake_case : List[str]=False ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[Any] = n_token
lowercase__ : List[str] = d_embed
lowercase__ : int = d_proj
lowercase__ : Union[str, Any] = cutoffs + [n_token]
lowercase__ : Optional[Any] = [0] + self.cutoffs
lowercase__ : Optional[Any] = div_val
lowercase__ : Dict = self.cutoffs[0]
lowercase__ : str = len(self.cutoffs ) - 1
lowercase__ : Optional[Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase__ : List[Any] = nn.Parameter(torch.zeros(self.n_clusters ,self.d_embed ) )
lowercase__ : Any = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase__ : Dict = nn.ModuleList()
lowercase__ : Optional[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_snake_case ,_snake_case ) ) )
else:
self.out_projs.append(_snake_case )
self.out_layers.append(nn.Linear(_snake_case ,_snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase__ , lowercase__ : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase__ : List[str] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_snake_case ,_snake_case ) ) )
self.out_layers.append(nn.Linear(_snake_case ,r_idx - l_idx ) )
lowercase__ : Union[str, Any] = keep_order
def UpperCAmelCase ( self : List[Any] ,_snake_case : int ,_snake_case : Union[str, Any] ,_snake_case : int ,_snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
if proj is None:
lowercase__ : List[Any] = nn.functional.linear(_snake_case ,_snake_case ,bias=_snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase__ : List[str] = nn.functional.linear(_snake_case ,proj.t().contiguous() )
lowercase__ : Tuple = nn.functional.linear(_snake_case ,_snake_case ,bias=_snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCAmelCase ( self : Dict ,_snake_case : List[str] ,_snake_case : Dict=None ,_snake_case : Dict=False ) -> Optional[int]:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
lowercase__ : List[str] = hidden[..., :-1, :].contiguous()
lowercase__ : List[str] = labels[..., 1:].contiguous()
lowercase__ : str = hidden.view(-1 ,hidden.size(-1 ) )
lowercase__ : int = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
lowercase__ : str = hidden.view(-1 ,hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase__ : int = self._compute_logit(_snake_case ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
if labels is not None:
lowercase__ : Dict = labels != -100
lowercase__ : Union[str, Any] = torch.zeros_like(_snake_case ,dtype=hidden.dtype ,device=hidden.device )
lowercase__ : List[str] = (
-nn.functional.log_softmax(_snake_case ,dim=-1 )[mask].gather(1 ,labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase__ : str = nn.functional.log_softmax(_snake_case ,dim=-1 )
else:
# construct weights and biases
lowercase__ , lowercase__ : Dict = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase__ , lowercase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase__ : List[str] = self.out_layers[0].weight[l_idx:r_idx]
lowercase__ : int = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase__ : Union[str, Any] = self.out_layers[i].weight
lowercase__ : List[str] = self.out_layers[i].bias
if i == 0:
lowercase__ : int = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
lowercase__ : Tuple = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(_snake_case )
biases.append(_snake_case )
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowercase__ : Optional[int] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
lowercase__ : Tuple = nn.functional.log_softmax(_snake_case ,dim=1 )
if labels is None:
lowercase__ : Any = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase__ : List[Any] = torch.zeros_like(_snake_case ,dtype=hidden.dtype ,device=hidden.device )
lowercase__ : Any = 0
lowercase__ : Optional[int] = [0] + self.cutoffs
for i in range(len(_snake_case ) - 1 ):
lowercase__ , lowercase__ : Any = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase__ : Dict = (labels >= l_idx) & (labels < r_idx)
lowercase__ : Optional[int] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase__ : Optional[int] = labels.index_select(0 ,_snake_case ) - l_idx
lowercase__ : Tuple = head_logprob.index_select(0 ,_snake_case )
lowercase__ : List[Any] = hidden.index_select(0 ,_snake_case )
else:
lowercase__ : int = hidden
if i == 0:
if labels is not None:
lowercase__ : str = head_logprob_i.gather(1 ,target_i[:, None] ).squeeze(1 )
else:
lowercase__ : Dict = head_logprob[:, : self.cutoffs[0]]
else:
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = weights[i], biases[i], self.out_projs[i]
lowercase__ : Optional[Any] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = nn.functional.log_softmax(_snake_case ,dim=1 )
lowercase__ : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase__ : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 ,target_i[:, None] ).squeeze(1 )
else:
lowercase__ : List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase__ : Optional[Any] = logprob_i
if labels is not None:
if (hasattr(self ,'''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 ,_snake_case ,-logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[str] ) -> int:
"""simple docstring"""
if self.n_clusters == 0:
lowercase__ : List[Any] = self._compute_logit(_snake_case ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
return nn.functional.log_softmax(_snake_case ,dim=-1 )
else:
# construct weights and biases
lowercase__ , lowercase__ : Optional[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase__ , lowercase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase__ : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
lowercase__ : List[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase__ : Optional[int] = self.out_layers[i].weight
lowercase__ : int = self.out_layers[i].bias
if i == 0:
lowercase__ : str = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
lowercase__ : Dict = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(_snake_case )
biases.append(_snake_case )
lowercase__ , lowercase__ , lowercase__ : List[str] = weights[0], biases[0], self.out_projs[0]
lowercase__ : Optional[Any] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
lowercase__ : Optional[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase__ : List[Any] = nn.functional.log_softmax(_snake_case ,dim=1 )
lowercase__ : Optional[Any] = [0] + self.cutoffs
for i in range(len(_snake_case ) - 1 ):
lowercase__ , lowercase__ : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase__ : Dict = head_logprob[:, : self.cutoffs[0]]
else:
lowercase__ , lowercase__ , lowercase__ : List[str] = weights[i], biases[i], self.out_projs[i]
lowercase__ : Any = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
lowercase__ : Any = nn.functional.log_softmax(_snake_case ,dim=1 )
lowercase__ : Any = head_logprob[:, -i] + tail_logprob_i
lowercase__ : str = logprob_i
return out
| 16 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa"""
UpperCAmelCase : Tuple = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
UpperCAmelCase : List[str] = """document_qa"""
UpperCAmelCase : str = AutoProcessor
UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel
UpperCAmelCase : int = ["""image""", """text"""]
UpperCAmelCase : int = ["""text"""]
def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.")
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str):
a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase)
a : Optional[Any] = self.pre_processor.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids
a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __snake_case ( self : int , __UpperCAmelCase : int):
return self.model.generate(
inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences
def __snake_case ( self : str , __UpperCAmelCase : List[Any]):
a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0]
a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "")
a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "")
a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token
a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase)
return sequence["answer"]
| 40 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
_UpperCamelCase : Tuple = mock.Mock()
_UpperCamelCase : List[Any] = 500
_UpperCamelCase : Optional[Any] = {}
_UpperCamelCase : Tuple = HTTPError
_UpperCamelCase : Any = {}
# Download this model to make sure it's in the cache.
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=lowerCamelCase__ ) as mock_head:
_UpperCamelCase : Dict = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
_UpperCamelCase : Dict = mock.Mock()
_UpperCamelCase : Optional[int] = 500
_UpperCamelCase : Any = {}
_UpperCamelCase : Optional[Any] = HTTPError
_UpperCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
_UpperCamelCase : Optional[Any] = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=lowerCamelCase__ ) as mock_head:
_UpperCamelCase : List[str] = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
try:
_UpperCamelCase : str = tempfile.mktemp()
with open(lowerCamelCase__ ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,lowerCamelCase__ )
_UpperCamelCase : Optional[int] = AlbertTokenizer.from_pretrained(lowerCamelCase__ )
finally:
os.remove(lowerCamelCase__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,lowerCamelCase__ )
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
_UpperCamelCase : Optional[Any] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowercase__ ( unittest.TestCase ):
lowercase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def UpperCamelCase_ ( cls : int ):
'''simple docstring'''
_UpperCamelCase : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def UpperCamelCase_ ( cls : Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : List[str] = os.path.join(lowerCamelCase__ ,'vocab.txt' )
with open(lowerCamelCase__ ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_UpperCamelCase : List[Any] = BertTokenizer(lowerCamelCase__ )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
_UpperCamelCase : List[Any] = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase__ ,repo_id='test-tokenizer' ,push_to_hub=lowerCamelCase__ ,use_auth_token=self._token )
_UpperCamelCase : int = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : List[Any] = os.path.join(lowerCamelCase__ ,'vocab.txt' )
with open(lowerCamelCase__ ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_UpperCamelCase : Any = BertTokenizer(lowerCamelCase__ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase__ ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=lowerCamelCase__ ,use_auth_token=self._token )
_UpperCamelCase : Dict = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : List[Any] = os.path.join(lowerCamelCase__ ,'vocab.txt' )
with open(lowerCamelCase__ ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_UpperCamelCase : List[Any] = CustomTokenizer(lowerCamelCase__ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' ,trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : Optional[int] = os.path.join(lowerCamelCase__ ,'vocab.txt' )
with open(lowerCamelCase__ ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_UpperCamelCase : Dict = BertTokenizerFast.from_pretrained(lowerCamelCase__ )
bert_tokenizer.save_pretrained(lowerCamelCase__ )
_UpperCamelCase : Any = CustomTokenizerFast.from_pretrained(lowerCamelCase__ )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
_UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' ,trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
_UpperCamelCase : Any = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' ,use_fast=lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : str = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : str = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
# Even if the offsets are wrong, we necessarily output correct string
# parts.
_UpperCamelCase : Optional[Any] = Trie()
_UpperCamelCase : Any = trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase__ ,['AB', 'C'] )
| 236 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
snake_case_ : List[str] = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
snake_case_ : Tuple = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
snake_case_ : str = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
snake_case_ : str = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
snake_case_ : List[Any] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) ,homepage='https://github.com/openai/human-eval' ,codebase_urls=['https://github.com/openai/human-eval'] ,reference_urls=['https://github.com/openai/human-eval'] ,license=_LICENSE ,)
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any]=[1, 10, 100] ,lowerCamelCase__ : int=4 ,lowerCamelCase__ : List[Any]=3.0 ):
'''simple docstring'''
if os.getenv('HF_ALLOW_CODE_EVAL' ,0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowerCamelCase__ ) as executor:
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Optional[int] = Counter()
_UpperCamelCase : int = 0
_UpperCamelCase : Dict = defaultdict(lowerCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase__ ,lowerCamelCase__ ) ):
for candidate in candidates:
_UpperCamelCase : int = candidate + '\n' + test_case
_UpperCamelCase : Optional[Any] = (test_program, timeout, task_id, completion_id[task_id])
_UpperCamelCase : Dict = executor.submit(lowerCamelCase__ ,*lowerCamelCase__ )
futures.append(lowerCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase__ ):
_UpperCamelCase : Dict = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
_UpperCamelCase , _UpperCamelCase : List[str] = [], []
for result in results.values():
result.sort()
_UpperCamelCase : Optional[Any] = [r[1]['passed'] for r in result]
total.append(len(lowerCamelCase__ ) )
correct.append(sum(lowerCamelCase__ ) )
_UpperCamelCase : List[str] = np.array(lowerCamelCase__ )
_UpperCamelCase : List[Any] = np.array(lowerCamelCase__ )
_UpperCamelCase : Tuple = k
_UpperCamelCase : Tuple = {F'pass@{k}': estimate_pass_at_k(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
def estimator(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : int = itertools.repeat(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
else:
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = iter(UpperCAmelCase_ )
return np.array([estimator(int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) , UpperCAmelCase_ ) for n, c in zip(UpperCAmelCase_ , UpperCAmelCase_ )] )
| 236 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_2 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=9_9 , _UpperCamelCase=3_2 , _UpperCamelCase=3_2 , _UpperCamelCase=2 , _UpperCamelCase=4 , _UpperCamelCase=3_7 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=0.02 , _UpperCamelCase=0 , _UpperCamelCase=None , ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = parent
UpperCAmelCase_ : Any = batch_size
UpperCAmelCase_ : Optional[int] = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[Any] = projection_dim
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : Any = dropout
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Any = scope
UpperCAmelCase_ : Optional[Any] = bos_token_id
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : str = None
if self.use_input_mask:
UpperCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase_ : Dict = input_mask.numpy()
UpperCAmelCase_ : Optional[int] = input_mask.shape
UpperCAmelCase_ : Tuple = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : List[str] = self.get_config()
return config, input_ids, tf.convert_to_tensor(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
UpperCAmelCase_ : str = TFBlipTextModel(config=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , training=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = model(_UpperCamelCase , training=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase_ : List[str] = config_and_inputs
UpperCAmelCase_ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase (snake_case_ , unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = (TFBlipTextModel,) if is_tf_available() else ()
_snake_case : Dict = False
_snake_case : int = False
_snake_case : Any = False
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[int] = BlipTextModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Tuple:
pass
def __UpperCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def __UpperCAmelCase ( self ) -> str:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def __UpperCAmelCase ( self ) -> Tuple:
pass
@slow
def __UpperCAmelCase ( self ) -> str:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = TFBlipTextModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=True ) -> List[str]:
super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCamelCase )
| 29 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 175 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : int = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowerCamelCase : Tuple = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_lowerCamelCase : Any = {
"camembert-base": 5_1_2,
}
_lowerCamelCase : List[str] = "▁"
class __snake_case (_a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = CamembertTokenizer
def __init__( self : List[str] , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : int="<s>" , _UpperCAmelCase : Optional[Any]="</s>" , _UpperCAmelCase : Tuple="</s>" , _UpperCAmelCase : List[Any]="<s>" , _UpperCAmelCase : int="<unk>" , _UpperCAmelCase : Any="<pad>" , _UpperCAmelCase : List[str]="<mask>" , _UpperCAmelCase : Union[str, Any]=["<s>NOTUSED", "</s>NOTUSED"] , **_UpperCAmelCase : Tuple , ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
_lowerCAmelCase : Optional[Any] = vocab_file
_lowerCAmelCase : List[Any] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : List[str] = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 159 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class __snake_case (_a ):
def __init__( self : Optional[Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : Any ) -> None:
'''simple docstring'''
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 159 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str , __lowerCamelCase : str ) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
SCREAMING_SNAKE_CASE__ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__lowerCamelCase , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = '''sgugger/tiny-distilbert-classification'''
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__lowerCamelCase , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase , [config] )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase , [config] )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase_ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase , [config] )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random'''
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase , configs=[config] )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def lowercase_ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__lowerCamelCase , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(__lowerCamelCase , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(__lowerCamelCase , '''env.csv''' ) , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__lowerCamelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__lowerCamelCase , '''env.csv''' ) ).exists() )
def lowercase_ ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(__lowerCamelCase : Optional[Any] ):
self.assertTrue(hasattr(__lowerCamelCase , '''sequential''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''cumulative''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''current''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , '''log.txt''' ) , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , eager_mode=__lowerCamelCase , multi_process=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__lowerCamelCase , '''log.txt''' ) ).exists() )
| 314 |
from __future__ import annotations
def UpperCAmelCase_ ( _A , _A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = word_bank or []
# create a table
SCREAMING_SNAKE_CASE__ = len(_A ) + 1
SCREAMING_SNAKE_CASE__ = []
for _ in range(_A ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_A ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_A )] == word:
SCREAMING_SNAKE_CASE__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_A )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_A )]:
combination.reverse()
return table[len(_A )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 314 | 1 |
from __future__ import annotations
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> list[list[int]]:
"""simple docstring"""
snake_case__ : list[list[int]] = []
create_all_state(1 , __lowerCAmelCase , __lowerCAmelCase , [] , __lowerCAmelCase )
return result
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__lowerCAmelCase , total_number - level + 2 ):
current_list.append(__lowerCAmelCase )
create_all_state(i + 1 , __lowerCAmelCase , level - 1 , __lowerCAmelCase , __lowerCAmelCase )
current_list.pop()
def _lowerCAmelCase ( __lowerCAmelCase ) -> None:
"""simple docstring"""
for i in total_list:
print(*__lowerCAmelCase )
if __name__ == "__main__":
A__ = 4
A__ = 2
A__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 44 |
def _lowerCAmelCase ( __lowerCAmelCase ) -> list:
"""simple docstring"""
for i in range(len(__lowerCAmelCase ) - 1 , 0 , -1 ):
snake_case__ : List[Any] = False
for j in range(__lowerCAmelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case__ , snake_case__ : Optional[int] = unsorted[j - 1], unsorted[j]
snake_case__ : Any = True
for j in range(__lowerCAmelCase ):
if unsorted[j] > unsorted[j + 1]:
snake_case__ , snake_case__ : Tuple = unsorted[j + 1], unsorted[j]
snake_case__ : int = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ = input('''Enter numbers separated by a comma:\n''').strip()
A__ = [int(item) for item in user_input.split(''',''')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 44 | 1 |
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowerCamelCase__ ( _a):
return (torch.arange(state.num_processes) + 1.0 + (state.num_processes * state.process_index)).to(state.device)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[int] = create_tensor(UpperCAmelCase_)
SCREAMING_SNAKE_CASE : int = gather(UpperCAmelCase_)
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1))
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : str = [state.process_index]
SCREAMING_SNAKE_CASE : List[str] = gather_object(UpperCAmelCase_)
assert len(UpperCAmelCase_) == state.num_processes, f"{gathered_obj}, {len(UpperCAmelCase_)} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes)), f"{gathered_obj} != {list(range(state.num_processes))}"
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Union[str, Any] = create_tensor(UpperCAmelCase_)
SCREAMING_SNAKE_CASE : Union[str, Any] = broadcast(UpperCAmelCase_)
assert broadcasted_tensor.shape == torch.Size([state.num_processes])
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1))
def lowerCamelCase__ ( _a):
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
SCREAMING_SNAKE_CASE : Dict = torch.arange(state.num_processes + 1).to(state.device)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(state.num_processes).to(state.device)
SCREAMING_SNAKE_CASE : int = pad_across_processes(UpperCAmelCase_)
assert padded_tensor.shape == torch.Size([state.num_processes + 1])
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes)) + [0]
def lowerCamelCase__ ( _a):
# For now runs on only two processes
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE : Dict = create_tensor(UpperCAmelCase_)
SCREAMING_SNAKE_CASE : Tuple = reduce(UpperCAmelCase_ , "sum")
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([4.0, 6]).to(state.device)
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_), f"{reduced_tensor} != {truth_tensor}"
def lowerCamelCase__ ( _a):
# For now runs on only two processes
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE : Dict = create_tensor(UpperCAmelCase_)
SCREAMING_SNAKE_CASE : Optional[Any] = reduce(UpperCAmelCase_ , "mean")
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([2.0, 3]).to(state.device)
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_), f"{reduced_tensor} != {truth_tensor}"
def lowerCamelCase__ ( _a):
# For xla_spawn (TPUs)
main()
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Dict = PartialState()
state.print(f"State: {state}")
state.print("testing gather")
test_gather(UpperCAmelCase_)
state.print("testing gather_object")
test_gather_object(UpperCAmelCase_)
state.print("testing broadcast")
test_broadcast(UpperCAmelCase_)
state.print("testing pad_across_processes")
test_pad_across_processes(UpperCAmelCase_)
state.print("testing reduce_sum")
test_reduce_sum(UpperCAmelCase_)
state.print("testing reduce_mean")
test_reduce_mean(UpperCAmelCase_)
if __name__ == "__main__":
main() | 76 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'
def lowercase_ ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=(4, 4, 64, 64) , SCREAMING_SNAKE_CASE_=False ) -> str:
__lowerCamelCase : List[str] = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase : Tuple = jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , dtype=SCREAMING_SNAKE_CASE_ )
return image
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="CompVis/stable-diffusion-v1-4" ) -> Dict:
__lowerCamelCase : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase : Optional[Any] = 'bf16' if fpaa else None
__lowerCamelCase , __lowerCamelCase : str = FlaxUNetaDConditionModel.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='unet' , dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ )
return model, params
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=(4, 77, 7_68) , SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
__lowerCamelCase : Any = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase : Optional[Any] = jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , dtype=SCREAMING_SNAKE_CASE_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 10_00, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase , __lowerCamelCase : Tuple = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.get_latents(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = model.apply(
{'params': params} , SCREAMING_SNAKE_CASE_ , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ) , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ).sample
assert sample.shape == latents.shape
__lowerCamelCase : Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase : Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 10_00, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase : List[str] = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self.get_latents(SCREAMING_SNAKE_CASE_ , shape=(4, 4, 96, 96) , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE_ , shape=(4, 77, 10_24) , fpaa=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = model.apply(
{'params': params} , SCREAMING_SNAKE_CASE_ , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ) , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ).sample
assert sample.shape == latents.shape
__lowerCamelCase : Optional[int] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase : Tuple = jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-2 )
| 185 | 0 |
'''simple docstring'''
__a = 65_521
def __UpperCAmelCase ( a_: str ):
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : str = 0
for plain_chr in plain_text:
_UpperCAmelCase : Union[str, Any] = (a + ord(a_ )) % MOD_ADLER
_UpperCAmelCase : List[str] = (b + a) % MOD_ADLER
return (b << 16) | a | 17 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A__ :
"""simple docstring"""
UpperCamelCase_ : Any = XGLMConfig
UpperCamelCase_ : Union[str, Any] = {}
UpperCamelCase_ : Dict = '''gelu'''
def __init__( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=1_4 , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=9_9 , lowerCAmelCase__ : Any=3_2 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : Any=3_7 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Optional[int]=5_1_2 , lowerCAmelCase__ : Optional[Any]=0.02 , ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : str = batch_size
_UpperCAmelCase : str = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : List[Any] = use_input_mask
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : int = d_model
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Tuple = ffn_dim
_UpperCAmelCase : Any = activation_function
_UpperCAmelCase : Union[str, Any] = activation_dropout
_UpperCAmelCase : Union[str, Any] = attention_dropout
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : Any = None
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : Tuple = 1
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : int = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_UpperCAmelCase : Any = None
if self.use_input_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Optional[Any] = self.get_config()
_UpperCAmelCase : Dict = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase__ , )
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
_UpperCAmelCase : Optional[int] = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase_ : Any = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase_ : Tuple = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase_ : Dict = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Tuple = False
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Dict = TFXGLMModelTester(self )
_UpperCAmelCase : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=3_7 )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[int] = TFXGLMModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def _lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[Any]=True ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : Any = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_UpperCAmelCase : int = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
_UpperCAmelCase : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
_UpperCAmelCase : Any = tokenizer("Today is a nice day and" , return_tensors="tf" )
_UpperCAmelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
_UpperCAmelCase : List[Any] = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , seed=[7, 0] )
_UpperCAmelCase : Any = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : List[Any] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : Optional[int] = "left"
# use different length sentences to test batching
_UpperCAmelCase : Tuple = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
_UpperCAmelCase : Dict = tokenizer(lowerCAmelCase__ , return_tensors="tf" , padding=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = inputs["input_ids"]
_UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs["attention_mask"] , max_new_tokens=1_2 )
_UpperCAmelCase : int = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
_UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=1_2 )
_UpperCAmelCase : Optional[int] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
_UpperCAmelCase : List[Any] = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=1_2 )
_UpperCAmelCase : List[str] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] ) | 17 | 1 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
a_ :Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[int], _snake_case : Any, _snake_case : List[str]=7_6_8 ) ->str:
super().__init__(_snake_case )
snake_case__ : Any = proj_size
snake_case__ : Dict = CLIPVisionModel(_snake_case )
snake_case__ : List[str] = PaintByExampleMapper(_snake_case )
snake_case__ : Optional[Any] = nn.LayerNorm(config.hidden_size )
snake_case__ : int = nn.Linear(config.hidden_size, self.proj_size )
# uncondition for scaling
snake_case__ : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def lowercase_ ( self : List[Any], _snake_case : Optional[int], _snake_case : int=False ) ->Dict:
snake_case__ : Union[str, Any] = self.model(pixel_values=_snake_case )
snake_case__ : int = clip_output.pooler_output
snake_case__ : str = self.mapper(latent_states[:, None] )
snake_case__ : List[Any] = self.final_layer_norm(_snake_case )
snake_case__ : Dict = self.proj_out(_snake_case )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class snake_case__ ( nn.Module ):
"""simple docstring"""
def __init__( self : str, _snake_case : Optional[int] ) ->Tuple:
super().__init__()
snake_case__ : Tuple = (config.num_hidden_layers + 1) // 5
snake_case__ : int = config.hidden_size
snake_case__ : Optional[Any] = 1
snake_case__ : str = nn.ModuleList(
[
BasicTransformerBlock(_snake_case, _snake_case, _snake_case, activation_fn='gelu', attention_bias=_snake_case )
for _ in range(_snake_case )
] )
def lowercase_ ( self : List[str], _snake_case : Any ) ->Union[str, Any]:
for block in self.blocks:
snake_case__ : Tuple = block(_snake_case )
return hidden_states
| 277 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_lowerCamelCase : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted)) | 96 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCAmelCase = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
_a : Optional[int] = []
for num in range(len(__a ) ):
_a : List[str] = 0
while 2 * i * i <= odd_composites[num]:
_a : Optional[Any] = odd_composites[num] - 2 * i * i
if is_prime(__a ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__a ) == n:
return list_nums
return []
def UpperCAmelCase_ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__lowerCAmelCase = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__lowerCAmelCase = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
_a : List[Any] = state_dict.pop(__a )
# emb -> embedding
if name.startswith('emb.' ):
_a : List[str] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_a : Dict = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_a : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __a )
# ffn -> feed_forward
_a : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_a : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_a : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_a : Tuple = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_a : Tuple = 'rwkv.' + name
_a : List[Any] = weight
return state_dict
def UpperCAmelCase_ (__a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : str=None , __a : List[str]=None , __a : int=False , __a : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_a : List[Any] = 5_0_2_7_7
_a : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_a : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=__a )
_a : List[Any] = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
_a : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : str = RwkvConfig(
vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
_a : Tuple = hf_hub_download(__a , __a )
_a : Optional[int] = torch.load(__a , map_location='cpu' )
_a : Dict = convert_state_dict(__a )
# 4. Split in shards and save
_a, _a : List[Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a , os.path.join(__a , __a ) )
if index is not None:
_a : Dict = os.path.join(__a , __a )
# Save the index as well
with open(__a , 'w' , encoding='utf-8' ) as f:
_a : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Optional[Any] = torch.load(os.path.join(__a , __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_a : List[str] = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a , max_shard_size='2GB' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__lowerCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5 | 1 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_A : Tuple = '''\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'''
_A : Optional[Any] = '''\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'''
_A : Tuple = '''\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def a ( self : int ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any = 1 , SCREAMING_SNAKE_CASE__ : Union[str, Any] = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_A , hypotheses=_A , min_len=_A , max_len=_A )
}
| 229 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["torch", "scipy"]
def __init__( self , *_A , **_A ) -> Tuple:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Any:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 299 | 0 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE : int = get_logger(__name__)
class __lowerCamelCase ( enum.Enum ):
__UpperCamelCase = 'all_checks'
__UpperCamelCase = 'basic_checks'
__UpperCamelCase = 'no_checks'
class __lowerCamelCase ( __lowercase ):
pass
class __lowerCamelCase ( __lowercase ):
pass
class __lowerCamelCase ( __lowercase ):
pass
class __lowerCamelCase ( __lowercase ):
pass
def __UpperCAmelCase ( snake_case_ : Optional[dict] , snake_case_ : dict , snake_case_ : Tuple=None ) -> str:
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_lowerCAmelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_lowerCAmelCase = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(UpperCamelCase__ ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn\'t match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"""Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class __lowerCamelCase ( __lowercase ):
pass
class __lowerCamelCase ( __lowercase ):
pass
class __lowerCamelCase ( __lowercase ):
pass
class __lowerCamelCase ( __lowercase ):
pass
def __UpperCAmelCase ( snake_case_ : Optional[dict] , snake_case_ : dict ) -> str:
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_lowerCAmelCase = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(UpperCamelCase__ ) > 0:
raise NonMatchingSplitsSizesError(str(UpperCamelCase__ ) )
logger.info("""All the splits matched successfully.""" )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : bool = True ) -> int:
if record_checksum:
_lowerCAmelCase = shaaaa()
with open(UpperCamelCase__ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(UpperCamelCase__ )
_lowerCAmelCase = m.hexdigest()
else:
_lowerCAmelCase = None
return {"num_bytes": os.path.getsize(UpperCamelCase__ ), "checksum": checksum}
def __UpperCAmelCase ( snake_case_ : List[str] ) -> List[str]:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False | 352 |
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = limit + 1
_lowerCAmelCase = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }') | 317 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__lowercase , 2 ) + pow(__lowercase , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 247 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79 | 0 |
import logging
from transformers.configuration_utils import PretrainedConfig
_snake_case = logging.getLogger(__name__)
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'masked_bert'
def __init__( self :int , _lowercase :str=3_05_22 , _lowercase :Union[str, Any]=7_68 , _lowercase :List[Any]=12 , _lowercase :List[str]=12 , _lowercase :List[str]=30_72 , _lowercase :str="gelu" , _lowercase :Union[str, Any]=0.1 , _lowercase :List[Any]=0.1 , _lowercase :int=5_12 , _lowercase :Tuple=2 , _lowercase :List[Any]=0.02 , _lowercase :Dict=1e-12 , _lowercase :int=0 , _lowercase :str="topK" , _lowercase :List[str]="constant" , _lowercase :Optional[int]=0.0 , **_lowercase :int , ):
'''simple docstring'''
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = pruning_method
lowercase__ = mask_init
lowercase__ = mask_scale
| 361 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _A ( __magic_name__ ):
lowercase__ = checkpoints.load_tax_checkpoint(__magic_name__ )
lowercase__ = flatten_dict(__magic_name__ )
return flax_params
def _A ( __magic_name__ ):
lowercase__ = {}
lowercase__ = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
lowercase__ = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowercase__ = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowercase__ = new_key.replace(__magic_name__ , __magic_name__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowercase__ = new_key.replace(__magic_name__ , __magic_name__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowercase__ = re.sub(R"layers_(\d+)" , R"layer.\1" , __magic_name__ )
lowercase__ = new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowercase__ = re.sub(R"layers_(\d+)" , R"layer.\1" , __magic_name__ )
lowercase__ = flax_dict[key]
lowercase__ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowercase__ = torch.from_numpy(converted_dict[key].T )
else:
lowercase__ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _A ( __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__=False ):
lowercase__ = get_flax_param(__magic_name__ )
if not use_large:
lowercase__ = PixaStructVisionConfig()
lowercase__ = PixaStructTextConfig()
else:
lowercase__ = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowercase__ = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowercase__ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__magic_name__ )
lowercase__ = PixaStructForConditionalGeneration(__magic_name__ )
lowercase__ = rename_and_convert_flax_params(__magic_name__ )
model.load_state_dict(__magic_name__ )
lowercase__ = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
lowercase__ = PixaStructImageProcessor()
lowercase__ = PixaStructProcessor(image_processor=__magic_name__ , tokenizer=__magic_name__ )
if use_large:
lowercase__ = 4096
lowercase__ = True
# mkdir if needed
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
print("Model saved in {}".format(__magic_name__ ) )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
_snake_case = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 201 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :int ) -> List[Any]:
'''simple docstring'''
__A = 'ylacombe/bark-small'
__A = tempfile.mkdtemp()
__A = 'en_speaker_1'
__A = 'This is a test string'
__A = 'speaker_embeddings_path.json'
__A = 'speaker_embeddings'
def lowercase_ ( self :int , **_A :List[str] ) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **_SCREAMING_SNAKE_CASE )
def lowercase_ ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self :Any ) -> Any:
'''simple docstring'''
__A = self.get_tokenizer()
__A = BarkProcessor(tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
__A = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowercase_ ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
__A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__A = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowercase_ ( self :Dict ) -> Tuple:
'''simple docstring'''
__A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__A = 35
__A = 2
__A = 8
__A = {
'semantic_prompt': np.ones(_SCREAMING_SNAKE_CASE ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__A = processor(text=self.input_string , voice_preset=_SCREAMING_SNAKE_CASE )
__A = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_SCREAMING_SNAKE_CASE , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__A = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__A = processor(text=self.input_string , voice_preset=_SCREAMING_SNAKE_CASE )
__A = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_SCREAMING_SNAKE_CASE , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__A = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowercase_ ( self :int ) -> List[str]:
'''simple docstring'''
__A = self.get_tokenizer()
__A = BarkProcessor(tokenizer=_SCREAMING_SNAKE_CASE )
__A = processor(text=self.input_string )
__A = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 161 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__A : Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _A : str , _A : str , _A : str ) ->int:
"""simple docstring"""
lowerCamelCase_ =UniSpeechSatForSequenceClassification.from_pretrained(_A , config=_A )
lowerCamelCase_ =downstream_dict["""projector.weight"""]
lowerCamelCase_ =downstream_dict["""projector.bias"""]
lowerCamelCase_ =downstream_dict["""model.post_net.linear.weight"""]
lowerCamelCase_ =downstream_dict["""model.post_net.linear.bias"""]
return model
def __UpperCamelCase ( _A : Optional[int] , _A : str , _A : Any ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =UniSpeechSatForAudioFrameClassification.from_pretrained(_A , config=_A )
lowerCamelCase_ =downstream_dict["""model.linear.weight"""]
lowerCamelCase_ =downstream_dict["""model.linear.bias"""]
return model
def __UpperCamelCase ( _A : Optional[Any] , _A : Optional[Any] , _A : Optional[Any] ) ->List[Any]:
"""simple docstring"""
lowerCamelCase_ =UniSpeechSatForXVector.from_pretrained(_A , config=_A )
lowerCamelCase_ =downstream_dict["""connector.weight"""]
lowerCamelCase_ =downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCamelCase_ =downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
lowerCamelCase_ =downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
lowerCamelCase_ =downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
lowerCamelCase_ =downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
lowerCamelCase_ =downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
lowerCamelCase_ =downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
lowerCamelCase_ =downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __UpperCamelCase ( _A : Any , _A : Optional[Any] , _A : Union[str, Any] , _A : str ) ->Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =torch.load(_A , map_location="""cpu""" )
lowerCamelCase_ =checkpoint["""Downstream"""]
lowerCamelCase_ =UniSpeechSatConfig.from_pretrained(_A )
lowerCamelCase_ =WavaVecaFeatureExtractor.from_pretrained(
_A , return_attention_mask=_A , do_normalize=_A )
lowerCamelCase_ =hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
lowerCamelCase_ =convert_classification(_A , _A , _A )
elif arch.endswith("""ForAudioFrameClassification""" ):
lowerCamelCase_ =convert_diarization(_A , _A , _A )
elif arch.endswith("""ForXVector""" ):
lowerCamelCase_ =convert_xvector(_A , _A , _A )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
lowerCamelCase_ =checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(_A )
hf_model.save_pretrained(_A )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__A : int = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 154 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCAmelCase_ : str = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
UpperCAmelCase_ : Optional[Any] = n - k
# Calculate C(n,k)
for i in range(A__ ):
result *= n - i
result //= i + 1
return result
def _lowerCamelCase ( lowerCamelCase_ : int ):
"""simple docstring"""
return binomial_coefficient(2 * node_count , A__ ) // (node_count + 1)
def _lowerCamelCase ( lowerCamelCase_ : int ):
"""simple docstring"""
if n < 0:
raise ValueError('factorial() not defined for negative values' )
UpperCAmelCase_ : Tuple = 1
for i in range(1 , n + 1 ):
result *= i
return result
def _lowerCamelCase ( lowerCamelCase_ : int ):
"""simple docstring"""
return catalan_number(A__ ) * factorial(A__ )
if __name__ == "__main__":
snake_case__ : str = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 357 | '''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : List[Any] = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , snake_case_=None , snake_case_=None , *snake_case_ , **snake_case_ ):
'''simple docstring'''
super().__init__(*snake_case_ , **snake_case_ )
if config is None:
assert isinstance(self.model , snake_case_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
UpperCAmelCase_ : Tuple = self.model.config
else:
UpperCAmelCase_ : Optional[Any] = config
UpperCAmelCase_ : Optional[Any] = data_args
UpperCAmelCase_ : Dict = self.config.tgt_vocab_size if isinstance(self.config , snake_case_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
' padding..' )
if self.args.label_smoothing == 0:
UpperCAmelCase_ : Dict = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
UpperCAmelCase_ : Union[str, Any] = label_smoothed_nll_loss
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if self.optimizer is None:
UpperCAmelCase_ : Optional[Any] = ['bias', 'LayerNorm.weight']
UpperCAmelCase_ : Union[str, Any] = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
UpperCAmelCase_ : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
UpperCAmelCase_ : List[str] = Adafactor
UpperCAmelCase_ : int = {'scale_parameter': False, 'relative_step': False}
else:
UpperCAmelCase_ : Union[str, Any] = AdamW
UpperCAmelCase_ : Optional[int] = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
UpperCAmelCase_ : Optional[int] = self.args.learning_rate
if self.sharded_ddp:
UpperCAmelCase_ : Optional[Any] = OSS(
params=snake_case_ , optim=snake_case_ , **snake_case_ , )
else:
UpperCAmelCase_ : Tuple = optimizer_cls(snake_case_ , **snake_case_ )
if self.lr_scheduler is None:
UpperCAmelCase_ : int = self._get_lr_scheduler(snake_case_ )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : int = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
UpperCAmelCase_ : List[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
UpperCAmelCase_ : Optional[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
UpperCAmelCase_ : int = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case_ )
return scheduler
def _UpperCamelCase ( self ):
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
UpperCAmelCase_ : Any = model(**snake_case_ , use_cache=snake_case_ )[0]
UpperCAmelCase_ : int = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = model(**snake_case_ , labels=snake_case_ , use_cache=snake_case_ )[:2]
else:
# compute label smoothed loss
UpperCAmelCase_ : List[str] = model(**snake_case_ , use_cache=snake_case_ )[0]
UpperCAmelCase_ : Optional[int] = torch.nn.functional.log_softmax(snake_case_ , dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.loss_fn(snake_case_ , snake_case_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = inputs.pop('labels' )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self._compute_loss(snake_case_ , snake_case_ , snake_case_ )
return loss
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self._prepare_inputs(snake_case_ )
UpperCAmelCase_ : Union[str, Any] = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
UpperCAmelCase_ : Tuple = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **snake_case_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase_ : Tuple = self._pad_tensors_to_max_len(snake_case_ , gen_kwargs['max_length'] )
UpperCAmelCase_ : List[str] = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self._compute_loss(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ : Optional[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
UpperCAmelCase_ : str = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase_ : List[Any] = self._pad_tensors_to_max_len(snake_case_ , gen_kwargs['max_length'] )
return (loss, logits, labels)
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F''' padded to `max_length`={max_length}''' )
UpperCAmelCase_ : Tuple = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
UpperCAmelCase_ : Dict = tensor
return padded_tensor
| 274 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Dict = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """gptj"""
a_ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , lowerCAmelCase_ : List[Any]=5_0_4_0_0 , lowerCAmelCase_ : Optional[int]=2_0_4_8 , lowerCAmelCase_ : Optional[int]=4_0_9_6 , lowerCAmelCase_ : Tuple=2_8 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Tuple=6_4 , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]="gelu_new" , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Union[str, Any]=1e-5 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[int]=5_0_2_5_6 , lowerCAmelCase_ : Optional[int]=5_0_2_5_6 , lowerCAmelCase_ : Optional[int]=False , **lowerCAmelCase_ : Dict , ) -> List[str]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , **lowerCAmelCase_ )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : str = "default" , lowerCAmelCase_ : List[PatchingSpec] = None , lowerCAmelCase_ : bool = False , ) -> Any:
super().__init__(lowerCAmelCase_ , task=lowerCAmelCase_ , patching_specs=lowerCAmelCase_ , use_past=lowerCAmelCase_ )
if not getattr(self._config , 'pad_token_id' , lowerCAmelCase_ ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def lowercase ( self : int ) -> Mapping[str, Mapping[int, str]]:
__lowerCAmelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
__lowerCAmelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowercase ( self : Union[str, Any] ) -> int:
return self._config.n_layer
@property
def lowercase ( self : List[str] ) -> int:
return self._config.n_head
def lowercase ( self : Dict , lowerCAmelCase_ : PreTrainedTokenizer , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
__lowerCAmelCase = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs['attention_mask']
if self.use_past:
__lowerCAmelCase = ordered_inputs['attention_mask'].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowercase ( self : Tuple ) -> int:
return 1_3
| 284 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int]=None, lowerCAmelCase_ : List[Any]=None ):
if attention_mask is None:
__lowerCAmelCase = tf.cast(tf.math.not_equal(lowerCAmelCase_, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _UpperCAmelCase :
"""simple docstring"""
a_ = OPTConfig
a_ = {}
a_ = """gelu"""
def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=1_3 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Any=9_9 , lowerCAmelCase_ : Any=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Tuple=2_0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_6 , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = embed_dim
__lowerCAmelCase = word_embed_proj_dim
__lowerCAmelCase = False
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase_ , **self.config_updates , )
__lowerCAmelCase = prepare_opt_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def lowercase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
__lowerCAmelCase = TFOPTModel(config=lowerCAmelCase_ )
__lowerCAmelCase = inputs_dict['input_ids']
__lowerCAmelCase = input_ids[:1, :]
__lowerCAmelCase = inputs_dict['attention_mask'][:1, :]
__lowerCAmelCase = 1
# first forward pass
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1e-3 )
@require_tf
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
a_ = (TFOPTForCausalLM,) if is_tf_available() else ()
a_ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
a_ = False
a_ = False
a_ = False
a_ = 10
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = TFOPTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ):
if hasattr(lowerCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase_ )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCAmelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase_ )
# check that weights remain the same after resizing
__lowerCAmelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase = False
self.assertTrue(lowerCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase_ )
__lowerCAmelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase = False
self.assertTrue(lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
return tf.constant(lowerCAmelCase_, dtype=tf.intaa )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
a_ = 99
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCAmelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCAmelCase = input_ids.shape[0]
__lowerCAmelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = TFOPTModel.from_pretrained('facebook/opt-350m' )
__lowerCAmelCase = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase = tf.not_equal(lowerCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
__lowerCAmelCase = model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).last_hidden_state
__lowerCAmelCase = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-3 ) )
__lowerCAmelCase = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
__lowerCAmelCase = xla_generate(lowerCAmelCase_ , lowerCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-2 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : int ) -> Dict:
super().setUp()
__lowerCAmelCase = 'facebook/opt-350m'
def lowercase ( self : Dict ) -> Any:
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCAmelCase = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCAmelCase = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
__lowerCAmelCase = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
__lowerCAmelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase ( self : Optional[int] ) -> int:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowercase ( self : int ) -> str:
__lowerCAmelCase = 'facebook/opt-125m'
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCAmelCase = []
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=1_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase = 'facebook/opt-350m'
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = 'left'
# use different length sentences to test batching
__lowerCAmelCase = [
'Hello, my dog is a little',
'Today, I',
]
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' , padding=lowerCAmelCase_ )
__lowerCAmelCase = inputs['input_ids']
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ , attention_mask=inputs['attention_mask'] )
__lowerCAmelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ )
__lowerCAmelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
__lowerCAmelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ , max_length=model.config.max_length - num_paddings )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [non_padded_sentence, padded_sentence] )
def lowercase ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase = 'facebook/opt-350m'
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCAmelCase = []
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=1_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 284 | 1 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__snake_case = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_a = R'''\w+[.]\d+'''
_a = re.findall(_lowerCAmelCase, _lowerCAmelCase )
for pat in pats:
_a = key.replace(_lowerCAmelCase, '''_'''.join(pat.split('''.''' ) ) )
return key
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Tuple, _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_a = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_a = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_a = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_a = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_a = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_a = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_a = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
_a = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_a = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_a = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[int], _lowerCAmelCase : List[str]=42 ):
"""simple docstring"""
_a = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_a = flax_model.init_weights(PRNGKey(_lowerCAmelCase ) )
_a = flatten_dict(_lowerCAmelCase )
_a = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_a = rename_key(_lowerCAmelCase )
_a = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
_a , _a = rename_key_and_reshape_tensor(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
_a = jnp.asarray(_lowerCAmelCase )
return unflatten_dict(_lowerCAmelCase ) | 153 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
_a = 4
_a = (1 << p) - 1
for _ in range(p - 2 ):
_a = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11)) | 153 | 1 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCamelCase__ :
'''simple docstring'''
@staticmethod
def _lowercase ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
pass
def A ( _SCREAMING_SNAKE_CASE ) -> Dict:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE__ : Dict = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
lowerCamelCase : str = pipeline(
"document-question-answering" , model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = INVOICE_URL
lowerCamelCase : Optional[int] = list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , "" ) ) )
lowerCamelCase : Optional[Any] = "What is the placebo?"
lowerCamelCase : int = [
{
"image": load_image(UpperCamelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
lowerCamelCase : Dict = dqa_pipeline(UpperCamelCase__ , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [
[
{"score": ANY(UpperCamelCase__ ), "answer": ANY(UpperCamelCase__ ), "start": ANY(UpperCamelCase__ ), "end": ANY(UpperCamelCase__ )},
{"score": ANY(UpperCamelCase__ ), "answer": ANY(UpperCamelCase__ ), "start": ANY(UpperCamelCase__ ), "end": ANY(UpperCamelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Any = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
lowerCamelCase : Union[str, Any] = INVOICE_URL
lowerCamelCase : Tuple = "How many cats are there?"
lowerCamelCase : Dict = [
{"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
lowerCamelCase : List[Any] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , UpperCamelCase__ )
lowerCamelCase : int = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , UpperCamelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCamelCase : str = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCamelCase : List[Any] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(UpperCamelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCamelCase : Dict = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCamelCase : Optional[int] = []
lowerCamelCase : List[Any] = []
lowerCamelCase : Optional[Any] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , words=UpperCamelCase__ , boxes=UpperCamelCase__ , top_k=2 )
self.assertEqual(UpperCamelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self ) -> Any:
lowerCamelCase : Tuple = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
lowerCamelCase : List[str] = INVOICE_URL
lowerCamelCase : str = "What is the invoice number?"
lowerCamelCase : int = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase : Tuple = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self ) -> Any:
lowerCamelCase : Optional[int] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
lowerCamelCase : Dict = INVOICE_URL
lowerCamelCase : Optional[Any] = "What is the invoice number?"
lowerCamelCase : Tuple = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase : Optional[Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase : int = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : str = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=UpperCamelCase__ )
lowerCamelCase : Dict = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=UpperCamelCase__ , revision="3dc6de3" , )
lowerCamelCase : Dict = INVOICE_URL
lowerCamelCase : Any = "What is the invoice number?"
lowerCamelCase : Union[str, Any] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCamelCase : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCamelCase : Optional[int] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
lowerCamelCase : int = list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , "" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase : List[str] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Any = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=UpperCamelCase__ )
lowerCamelCase : Dict = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=UpperCamelCase__ , revision="3dc6de3" , max_seq_len=50 , )
lowerCamelCase : List[str] = INVOICE_URL
lowerCamelCase : Any = "What is the invoice number?"
lowerCamelCase : str = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase : Dict = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
lowerCamelCase : Tuple = list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , "" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase : Optional[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _lowercase ( self ) -> str:
lowerCamelCase : Optional[int] = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
lowerCamelCase : str = INVOICE_URL
lowerCamelCase : Any = "What is the invoice number?"
lowerCamelCase : Tuple = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _lowercase ( self ) -> Tuple:
pass
| 48 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = hf_hub_download(
repo_id='''nateraw/video-demo''',filename='''archery.mp4''',repo_type='''dataset''' )
A__ = VideoClassificationPipeline(model=__lowerCamelCase,image_processor=__lowerCamelCase,top_k=2 )
A__ = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
for example in examples:
A__ = video_classifier(__lowerCamelCase )
self.assertEqual(
__lowerCamelCase,[
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
],)
@require_torch
def UpperCamelCase ( self ):
A__ = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
A__ = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10},crop_size={'''height''': 10, '''width''': 10} )
A__ = pipeline(
'''video-classification''',model=__lowerCamelCase,feature_extractor=__lowerCamelCase,frame_sampling_rate=4 )
A__ = hf_hub_download(repo_id='''nateraw/video-demo''',filename='''archery.mp4''',repo_type='''dataset''' )
A__ = video_classifier(__lowerCamelCase,top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],)
A__ = video_classifier(
[
video_file_path,
video_file_path,
],top_k=2,)
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
],)
@require_tf
def UpperCamelCase ( self ):
pass
| 193 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=99 , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=9 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__=8 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0_02 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=0 , lowerCamelCase__=None , lowerCamelCase__=None , ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = encoder_seq_length
__lowerCamelCase = decoder_seq_length
# For common tests
__lowerCamelCase = self.decoder_seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = d_ff
__lowerCamelCase = relative_attention_num_buckets
__lowerCamelCase = dropout_rate
__lowerCamelCase = initializer_factor
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = decoder_start_token_id
__lowerCamelCase = None
__lowerCamelCase = decoder_layers
def lowercase_ ( self ) -> str:
'''simple docstring'''
return TaConfig.from_pretrained('google/umt5-base' )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ) -> Dict:
'''simple docstring'''
if attention_mask is None:
__lowerCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__lowerCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase__ )
if decoder_head_mask is None:
__lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase__ )
if cross_attn_head_mask is None:
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = self.get_config()
__lowerCamelCase = config.num_attention_heads
__lowerCamelCase = self.prepare_inputs_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, input_dict
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = UMTaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(
input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ , decoder_attention_mask=lowerCamelCase__ , )
__lowerCamelCase = model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
__lowerCamelCase = result.last_hidden_state
__lowerCamelCase = result.past_key_values
__lowerCamelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = UMTaModel(config=lowerCamelCase__ ).get_decoder().to(lowerCamelCase__ ).eval()
# first forward pass
__lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
__lowerCamelCase , __lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = model(lowerCamelCase__ )['last_hidden_state']
__lowerCamelCase = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ )['last_hidden_state']
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = UMTaModel(config=lowerCamelCase__ ).to(lowerCamelCase__ ).half().eval()
__lowerCamelCase = model(**lowerCamelCase__ )['last_hidden_state']
self.parent.assertFalse(torch.isnan(lowerCamelCase__ ).any().item() )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
snake_case_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
snake_case_ = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = True
snake_case_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
snake_case_ = [0.8, 0.9]
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=lowerCamelCase__ , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = config_and_inputs[0]
__lowerCamelCase = UMTaForConditionalGeneration(lowerCamelCase__ ).eval()
model.to(lowerCamelCase__ )
__lowerCamelCase = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase__ ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase__ ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase__ ),
}
for attn_name, (name, mask) in zip(lowerCamelCase__ , head_masking.items() ):
__lowerCamelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase__ )
__lowerCamelCase = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase__ , return_dict_in_generate=lowerCamelCase__ , **lowerCamelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def lowercase_ ( self ) -> str:
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=lowerCamelCase__ , legacy=lowerCamelCase__ )
__lowerCamelCase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
__lowerCamelCase = tokenizer(lowerCamelCase__ , return_tensors='pt' , padding=lowerCamelCase__ ).input_ids
# fmt: off
__lowerCamelCase = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = model.generate(input_ids.to(lowerCamelCase__ ) )
__lowerCamelCase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
__lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 356 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 42
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCamelCase__ = 32 , lowerCamelCase__ = 64 , lowerCamelCase__ = 20 , lowerCamelCase__ = 768 , lowerCamelCase__=77 , lowerCamelCase__=4 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = "silu" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "linear" , lowerCamelCase__ = "prd" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Tuple:
'''simple docstring'''
super().__init__()
__lowerCamelCase = num_attention_heads
__lowerCamelCase = attention_head_dim
__lowerCamelCase = num_attention_heads * attention_head_dim
__lowerCamelCase = additional_embeddings
__lowerCamelCase = time_embed_dim or inner_dim
__lowerCamelCase = embedding_proj_dim or embedding_dim
__lowerCamelCase = clip_embed_dim or embedding_dim
__lowerCamelCase = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 )
__lowerCamelCase = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if embedding_proj_norm_type is None:
__lowerCamelCase = None
elif embedding_proj_norm_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if encoder_hid_proj_type is None:
__lowerCamelCase = None
elif encoder_hid_proj_type == "linear":
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) )
if added_emb_type == "prd":
__lowerCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) )
elif added_emb_type is None:
__lowerCamelCase = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__lowerCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='gelu' , attention_bias=lowerCamelCase__ , )
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
__lowerCamelCase = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__lowerCamelCase = nn.LayerNorm(lowerCamelCase__ )
__lowerCamelCase = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
__lowerCamelCase = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , lowerCamelCase__ , persistent=lowerCamelCase__ )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
__lowerCamelCase = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return processors
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , 'set_processor' ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase__ , lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , ) -> int:
'''simple docstring'''
__lowerCamelCase = hidden_states.shape[0]
__lowerCamelCase = timestep
if not torch.is_tensor(lowerCamelCase__ ):
__lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
__lowerCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device )
__lowerCamelCase = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__lowerCamelCase = timesteps_projected.to(dtype=self.dtype )
__lowerCamelCase = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
__lowerCamelCase = self.embedding_proj_norm(lowerCamelCase__ )
__lowerCamelCase = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__lowerCamelCase = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
__lowerCamelCase = self.proj_in(lowerCamelCase__ )
__lowerCamelCase = self.positional_embedding.to(hidden_states.dtype )
__lowerCamelCase = []
__lowerCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__lowerCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__lowerCamelCase = hidden_states[:, None, :]
__lowerCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__lowerCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 )
additional_embeds.append(lowerCamelCase__ )
__lowerCamelCase = torch.cat(
lowerCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__lowerCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__lowerCamelCase = F.pad(
lowerCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__lowerCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
__lowerCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
__lowerCamelCase = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 )
__lowerCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__lowerCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__lowerCamelCase = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
__lowerCamelCase = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
__lowerCamelCase = hidden_states[:, -1]
else:
__lowerCamelCase = hidden_states[:, additional_embeddings_len:]
__lowerCamelCase = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 348 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__ = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = [1, 1088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A__ ( nn.Module ):
def __init__( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[str] = "relu" , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Convad(
_UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , groups=_UpperCAmelCase , bias=_UpperCAmelCase , )
__lowercase = nn.BatchNormad(_UpperCAmelCase )
__lowercase = ACTaFN[activation] if activation is not None else nn.Identity()
def a__ ( self : Tuple , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.convolution(_UpperCAmelCase )
__lowercase = self.normalization(_UpperCAmelCase )
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig ) -> Any:
"""simple docstring"""
super().__init__()
__lowercase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__lowercase = config.num_channels
def a__ ( self : Optional[Any] , _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__lowercase = self.embedder(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase )
__lowercase = nn.BatchNormad(_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
__lowercase = self.convolution(_UpperCAmelCase )
__lowercase = self.normalization(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
super().__init__()
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
__lowercase = nn.Sequential(
nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , )
def a__ ( self : str , _UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
__lowercase = self.pooler(_UpperCAmelCase )
__lowercase = self.attention(_UpperCAmelCase )
__lowercase = hidden_state * attention
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Optional[int] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Tuple:
"""simple docstring"""
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__lowercase = ACTaFN[config.hidden_act]
def a__ ( self : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = hidden_state
__lowercase = self.layer(_UpperCAmelCase )
__lowercase = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__lowercase = ACTaFN[config.hidden_act]
def a__ ( self : Tuple , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = hidden_state
__lowercase = self.layer(_UpperCAmelCase )
__lowercase = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : List[Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , ) -> Dict:
"""simple docstring"""
super().__init__()
__lowercase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
__lowercase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for _ in range(depth - 1 )] , )
def a__ ( self : Any , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
__lowercase = self.layers(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : RegNetConfig ) -> int:
"""simple docstring"""
super().__init__()
__lowercase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ):
self.stages.append(RegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) )
def a__ ( self : int , _UpperCAmelCase : Tensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
__lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
__lowercase = stage_module(_UpperCAmelCase )
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = RegNetConfig
lowerCAmelCase__ : Optional[int] = "regnet"
lowerCAmelCase__ : Dict = "pixel_values"
lowerCAmelCase__ : List[str] = True
def a__ ( self : Any , _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a__ ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = value
SCREAMING_SNAKE_CASE__ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE__ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[Any] , _UpperCAmelCase : Any ) -> str:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config
__lowercase = RegNetEmbeddings(_UpperCAmelCase )
__lowercase = RegNetEncoder(_UpperCAmelCase )
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a__ ( self : Tuple , _UpperCAmelCase : Tensor , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
__lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.embedder(_UpperCAmelCase )
__lowercase = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__lowercase = encoder_outputs[0]
__lowercase = self.pooler(_UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , _UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config.num_labels
__lowercase = RegNetModel(_UpperCAmelCase )
# classification head
__lowercase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a__ ( self : List[Any] , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[torch.LongTensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.regnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__lowercase = outputs.pooler_output if return_dict else outputs[1]
__lowercase = self.classifier(_UpperCAmelCase )
__lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowercase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowercase = 'single_label_classification'
else:
__lowercase = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowercase = MSELoss()
if self.num_labels == 1:
__lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowercase = BCEWithLogitsLoss()
__lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
__lowercase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 325 |
from math import isqrt, loga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]:
__lowercase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800800 , SCREAMING_SNAKE_CASE : int = 800800 ) -> int:
__lowercase = degree * loga(SCREAMING_SNAKE_CASE )
__lowercase = int(SCREAMING_SNAKE_CASE )
__lowercase = calculate_prime_numbers(SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 325 | 1 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__magic_name__ = True
except (ImportError, AttributeError):
__magic_name__ = object
def _lowerCAmelCase ( *UpperCamelCase_ , **UpperCamelCase_ ):
pass
__magic_name__ = False
__magic_name__ = logging.get_logger("transformers-cli/serving")
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(a__ , args.host , args.port , args.workers )
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowercase : dict
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowercase : List[str]
__lowercase : Optional[List[int]]
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowercase : str
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowercase : Any
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
@staticmethod
def snake_case_ ( lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""")
serve_parser.add_argument(
"""--task""" , type=__lowerCAmelCase , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=__lowerCAmelCase , default="""localhost""" , help="""Interface the server will listen on.""")
serve_parser.add_argument("""--port""" , type=__lowerCAmelCase , default=8_8_8_8 , help="""Port the serving will listen to.""")
serve_parser.add_argument("""--workers""" , type=__lowerCAmelCase , default=1 , help="""Number of http workers""")
serve_parser.add_argument("""--model""" , type=__lowerCAmelCase , help="""Model's name or path to stored model.""")
serve_parser.add_argument("""--config""" , type=__lowerCAmelCase , help="""Model's config name or path to stored model.""")
serve_parser.add_argument("""--tokenizer""" , type=__lowerCAmelCase , help="""Tokenizer name to use.""")
serve_parser.add_argument(
"""--device""" , type=__lowerCAmelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=__lowerCAmelCase)
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = pipeline
__SCREAMING_SNAKE_CASE = host
__SCREAMING_SNAKE_CASE = port
__SCREAMING_SNAKE_CASE = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""")
else:
logger.info(f"Serving model over {host}:{port}")
__SCREAMING_SNAKE_CASE = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["""POST"""] , ),
] , timeout=6_0_0 , )
def snake_case_ ( self):
run(self._app , host=self.host , port=self.port , workers=self.workers)
def snake_case_ ( self):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config))
def snake_case_ ( self , lowerCAmelCase__ = Body(__lowerCAmelCase , embed=__lowerCAmelCase) , lowerCAmelCase__ = Body(__lowerCAmelCase , embed=__lowerCAmelCase)):
try:
__SCREAMING_SNAKE_CASE = self._pipeline.tokenizer.tokenize(__lowerCAmelCase)
if return_ids:
__SCREAMING_SNAKE_CASE = self._pipeline.tokenizer.convert_tokens_to_ids(__lowerCAmelCase)
return ServeTokenizeResult(tokens=__lowerCAmelCase , tokens_ids=__lowerCAmelCase)
else:
return ServeTokenizeResult(tokens=__lowerCAmelCase)
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={"""model""": """""", """error""": str(__lowerCAmelCase)})
def snake_case_ ( self , lowerCAmelCase__ = Body(__lowerCAmelCase , embed=__lowerCAmelCase) , lowerCAmelCase__ = Body(__lowerCAmelCase , embed=__lowerCAmelCase) , lowerCAmelCase__ = Body(__lowerCAmelCase , embed=__lowerCAmelCase) , ):
try:
__SCREAMING_SNAKE_CASE = self._pipeline.tokenizer.decode(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
return ServeDeTokenizeResult(model="""""" , text=__lowerCAmelCase)
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={"""model""": """""", """error""": str(__lowerCAmelCase)})
async def snake_case_ ( self , lowerCAmelCase__=Body(__lowerCAmelCase , embed=__lowerCAmelCase)):
# Check we don't have empty string
if len(__lowerCAmelCase) == 0:
return ServeForwardResult(output=[] , attention=[])
try:
# Forward through the model
__SCREAMING_SNAKE_CASE = self._pipeline(__lowerCAmelCase)
return ServeForwardResult(output=__lowerCAmelCase)
except Exception as e:
raise HTTPException(5_0_0 , {"""error""": str(__lowerCAmelCase)})
| 371 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : List[str] = CpmAntTokenizer
__lowercase : List[str] = False
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
@tooslow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""")
__SCREAMING_SNAKE_CASE = """今天天气真好!"""
__SCREAMING_SNAKE_CASE = ["""今天""", """天气""", """真""", """好""", """!"""]
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """今天天气真好!"""
__SCREAMING_SNAKE_CASE = [tokenizer.bos_token] + tokens
__SCREAMING_SNAKE_CASE = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
| 255 | 0 |
'''simple docstring'''
from collections.abc import Callable
class a__ :
def __init__( self , _UpperCamelCase = None ):
"""simple docstring"""
_lowercase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowercase : dict = {}
# Stores current size of heap.
_lowercase : Dict = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowercase : Optional[int] = key or (lambda _UpperCamelCase : x)
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
return int((i - 1) / 2 ) if i > 0 else None
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[int] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase , _lowercase : Any = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowercase , _lowercase : Optional[Any] = self.arr[j], self.arr[i]
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return self.arr[i][1] < self.arr[j][1]
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[str] = self._left(_UpperCamelCase )
_lowercase : Dict = self._right(_UpperCamelCase )
_lowercase : Any = i
if left is not None and not self._cmp(_UpperCamelCase , _UpperCamelCase ):
_lowercase : List[Any] = left
if right is not None and not self._cmp(_UpperCamelCase , _UpperCamelCase ):
_lowercase : int = right
return valid_parent
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[Any] = self._parent(_UpperCamelCase )
while parent is not None and not self._cmp(_UpperCamelCase , _UpperCamelCase ):
self._swap(_UpperCamelCase , _UpperCamelCase )
_lowercase , _lowercase : Optional[int] = parent, self._parent(_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[Any] = self._get_valid_parent(_UpperCamelCase )
while valid_parent != index:
self._swap(_UpperCamelCase , _UpperCamelCase )
_lowercase , _lowercase : Tuple = valid_parent, self._get_valid_parent(_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if item not in self.pos_map:
return
_lowercase : Any = self.pos_map[item]
_lowercase : Optional[int] = [item, self.key(_UpperCamelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_UpperCamelCase )
self._heapify_down(_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if item not in self.pos_map:
return
_lowercase : Optional[int] = self.pos_map[item]
del self.pos_map[item]
_lowercase : Union[str, Any] = self.arr[self.size - 1]
_lowercase : str = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_UpperCamelCase )
self._heapify_down(_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_UpperCamelCase )] )
else:
_lowercase : List[Any] = [item, self.key(_UpperCamelCase )]
_lowercase : Optional[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.arr[0] if self.size else None
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _A ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a__ :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=10 , _UpperCamelCase=3 , _UpperCamelCase=2 , _UpperCamelCase=2 , _UpperCamelCase=2 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=10 , _UpperCamelCase=0.0_2 , _UpperCamelCase=0.9 , _UpperCamelCase=None , ):
"""simple docstring"""
_lowercase : List[str] = parent
_lowercase : Tuple = batch_size
_lowercase : Tuple = image_size
_lowercase : Any = num_channels
_lowercase : Tuple = patch_size
_lowercase : Union[str, Any] = tubelet_size
_lowercase : str = num_frames
_lowercase : Any = is_training
_lowercase : Tuple = use_labels
_lowercase : List[Any] = hidden_size
_lowercase : int = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Optional[Any] = type_sequence_label_size
_lowercase : Optional[Any] = initializer_range
_lowercase : int = mask_ratio
_lowercase : Union[str, Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_lowercase : List[str] = (image_size // patch_size) ** 2
_lowercase : Optional[Any] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_lowercase : str = int(mask_ratio * self.seq_length )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowercase : Tuple = None
if self.use_labels:
_lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = VideoMAEModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase : Dict = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = VideoMAEForPreTraining(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowercase : Optional[int] = torch.ones((self.num_masks,) )
_lowercase : List[Any] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_lowercase : Tuple = mask.expand(self.batch_size , -1 ).bool()
_lowercase : Tuple = model(_UpperCamelCase , _UpperCamelCase )
# model only returns predictions for masked patches
_lowercase : Tuple = mask.sum().item()
_lowercase : Optional[Any] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : List[Any] = config_and_inputs
_lowercase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE : List[Any] = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = VideoMAEModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
_lowercase : Any = copy.deepcopy(_UpperCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowercase : Union[str, Any] = torch.ones((self.model_tester.num_masks,) )
_lowercase : Dict = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_lowercase : List[str] = mask.expand(self.model_tester.batch_size , -1 ).bool()
_lowercase : Any = bool_masked_pos.to(_UpperCamelCase )
if return_labels:
if model_class in [
*get_values(_UpperCamelCase ),
]:
_lowercase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
return inputs_dict
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[str] = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[Any] = model_class(_UpperCamelCase )
_lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : str = [*signature.parameters.keys()]
_lowercase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : int = VideoMAEModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Optional[int] = True
for model_class in self.all_model_classes:
_lowercase : List[str] = self.model_tester.seq_length - self.model_tester.num_masks
_lowercase : List[Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_lowercase : int = True
_lowercase : str = False
_lowercase : Any = True
_lowercase : Optional[Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_lowercase : Union[str, Any] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_lowercase : List[str] = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowercase : Tuple = True
_lowercase : Optional[Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_lowercase : Dict = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_lowercase : Tuple = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_lowercase : str = len(_UpperCamelCase )
# Check attention is always last and order is fine
_lowercase : List[Any] = True
_lowercase : List[str] = True
_lowercase : Any = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_lowercase : Optional[int] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCamelCase ) )
_lowercase : Optional[int] = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowerCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_lowercase : Optional[int] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_lowercase : Tuple = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_lowercase : List[str] = outputs.hidden_states
_lowercase : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
_lowercase : List[str] = self.model_tester.seq_length - self.model_tester.num_masks
_lowercase : Optional[Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_lowercase , _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Dict = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : List[str] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _A ( ) -> Any:
_lowercase : Tuple = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
_lowercase : int = np.load(snake_case )
return list(snake_case )
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
_UpperCamelCase )
_lowercase : Dict = self.default_image_processor
_lowercase : Optional[Any] = prepare_video()
_lowercase : Union[str, Any] = image_processor(_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_lowercase : str = model(**_UpperCamelCase )
# verify the logits
_lowercase : List[Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_lowercase : int = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(_UpperCamelCase )
_lowercase : Dict = self.default_image_processor
_lowercase : Optional[Any] = prepare_video()
_lowercase : List[Any] = image_processor(_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# add boolean mask, indicating which patches to mask
_lowercase : int = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
_lowercase : Any = torch.load(_UpperCamelCase )
# forward pass
with torch.no_grad():
_lowercase : List[Any] = model(**_UpperCamelCase )
# verify the logits
_lowercase : Dict = torch.Size([1, 1408, 1536] )
_lowercase : Tuple = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=_UpperCamelCase )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_lowercase : Tuple = torch.tensor([0.5_1_4_2] , device=_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , _UpperCamelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_lowercase : Dict = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=_UpperCamelCase ).to(
_UpperCamelCase )
with torch.no_grad():
_lowercase : Optional[int] = model(**_UpperCamelCase )
_lowercase : List[str] = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , _UpperCamelCase , atol=1E-4 ) )
| 250 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _lowercase ( __UpperCAmelCase ):
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> float:
return 0.0
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowerCamelCase : Union[str, Any] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : Dict = 512
lowerCamelCase : Optional[Any] = [1] + [0] * (size - 1)
lowerCamelCase : int = [filter_type.process(a_ ) for item in inputs]
lowerCamelCase : int = [0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase : List[Any] = np.abs(np.fft.fft(a_ ) )
lowerCamelCase : Optional[Any] = 20 * np.logaa(a_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
lowerCamelCase : Tuple = get_bounds(a_, a_ )
plt.ylim(max([-80, bounds[0]] ), min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(a_ )
plt.show()
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : Dict = 512
lowerCamelCase : Dict = [1] + [0] * (size - 1)
lowerCamelCase : Dict = [filter_type.process(a_ ) for item in inputs]
lowerCamelCase : str = [0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase : Optional[int] = np.angle(np.fft.fft(a_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(a_, -2 * pi ) )
plt.show()
| 205 |
"""simple docstring"""
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = 1
for i in range(1, num + 1 ):
fact *= i
return fact
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = 0
while number > 0:
lowerCamelCase : str = number % 10
sum_of_digits += last_digit
lowerCamelCase : Tuple = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def UpperCAmelCase ( a_ = 100 ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = factorial(a_ )
lowerCamelCase : int = split_and_add(a_ )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 205 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'vocab_file': 'spiece.model'}
lowerCAmelCase = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
lowerCAmelCase = {'bert_for_seq_generation': 512}
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[int] = []
_lowercase : str = ['''input_ids''', '''attention_mask''']
def __init__( self: str , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any]="<s>" , UpperCamelCase_: str="</s>" , UpperCamelCase_: Union[str, Any]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[Any]="<::::>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None:
"""simple docstring"""
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
@property
def lowerCamelCase_ ( self: Any ) -> str:
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self: Dict , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Tuple ) -> Dict:
"""simple docstring"""
return self.sp_model.piece_to_id(UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = self.sp_model.IdToPiece(UpperCamelCase_ )
return token
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
lowercase__ = []
else:
current_sub_tokens.append(UpperCamelCase_ )
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def lowerCamelCase_ ( self: str , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 110 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCAmelCase_ ).to(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained('google/mt5-small' )
_snake_case = tokenizer('Hello there' , return_tensors='pt' ).input_ids
_snake_case = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
_snake_case = model(input_ids.to(lowerCAmelCase_ ) , labels=labels.to(lowerCAmelCase_ ) ).loss
_snake_case = -(labels.shape[-1] * loss.item())
_snake_case = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 42 | 0 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : int , a__ : Any , a__ : Optional[int]=2 , a__ : List[str]=3 , a__ : Optional[int]=4 , a__ : Dict=2 , a__ : List[Any]=7 , a__ : str=True , a__ : int=True , a__ : str=True , a__ : Union[str, Any]=True , a__ : Dict=99 , a__ : Optional[Any]=36 , a__ : int=2 , a__ : List[str]=4 , a__ : List[str]=37 , a__ : List[Any]="gelu" , a__ : List[str]=0.1 , a__ : Optional[Any]=0.1 , a__ : Dict=512 , a__ : Dict=16 , a__ : Optional[Any]=2 , a__ : str=0.0_2 , a__ : Optional[Any]=6 , a__ : Tuple=6 , a__ : List[Any]=3 , a__ : Optional[int]=4 , a__ : Optional[int]=None , a__ : List[Any]=1000 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = patch_size
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = coordinate_size
__snake_case = shape_size
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
__snake_case = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__snake_case = text_seq_length
__snake_case = (image_size // patch_size) ** 2 + 1
__snake_case = self.text_seq_length + self.image_seq_length
def a (self : List[str] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__snake_case = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__snake_case = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = tmp_coordinate
__snake_case = tf.constant(a__ )
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.text_seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__snake_case = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def a (self : List[str] , a__ : Any , a__ : Any , a__ : Union[str, Any] , a__ : Optional[int] , a__ : List[str] , a__ : int ):
"""simple docstring"""
__snake_case = TFLayoutLMvaModel(config=a__ )
# text + image
__snake_case = model(a__ , pixel_values=a__ , training=a__ )
__snake_case = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , training=a__ , )
__snake_case = model(a__ , bbox=a__ , pixel_values=a__ , training=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__snake_case = model(a__ , training=a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__snake_case = model({'''pixel_values''': pixel_values} , training=a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def a (self : Dict , a__ : Any , a__ : List[Any] , a__ : Tuple , a__ : Any , a__ : Any , a__ : Optional[int] , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = TFLayoutLMvaForSequenceClassification(config=a__ )
__snake_case = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , training=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Optional[int] , a__ : str , a__ : List[str] , a__ : Tuple , a__ : Tuple , a__ : List[str] , a__ : List[Any] , a__ : int ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = TFLayoutLMvaForTokenClassification(config=a__ )
__snake_case = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , training=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def a (self : Union[str, Any] , a__ : List[Any] , a__ : List[str] , a__ : str , a__ : Union[str, Any] , a__ : int , a__ : List[str] , a__ : str ):
"""simple docstring"""
__snake_case = 2
__snake_case = TFLayoutLMvaForQuestionAnswering(config=a__ )
__snake_case = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , training=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a (self : Any ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) = config_and_inputs
__snake_case = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : str = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A_ : List[Any] = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A_ : Union[str, Any] = False
A_ : int = False
A_ : Optional[int] = False
def a (self : str , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Optional[int] ):
"""simple docstring"""
return True
def a (self : Union[str, Any] , a__ : Union[str, Any] , a__ : Optional[int] , a__ : str=False ):
"""simple docstring"""
__snake_case = copy.deepcopy(a__ )
if model_class in get_values(a__ ):
__snake_case = {
k: tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(a__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a__ ):
__snake_case = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a__ ):
__snake_case = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__snake_case = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a__ ):
__snake_case = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a__ ):
__snake_case = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def a (self : Any ):
"""simple docstring"""
__snake_case = TFLayoutLMvaModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : List[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
if getattr(a__ , '''hf_compute_loss''' , a__ ):
# The number of elements in the loss should be the same as the number of elements in the label
__snake_case = self._prepare_for_class(inputs_dict.copy() , a__ , return_labels=a__ )
__snake_case = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a__ )[0]
]
__snake_case = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__snake_case = self._prepare_for_class(inputs_dict.copy() , a__ , return_labels=a__ )
__snake_case = prepared_for_class.pop('''input_ids''' )
__snake_case = model(a__ , **a__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__snake_case = self._prepare_for_class(inputs_dict.copy() , a__ , return_labels=a__ )
__snake_case = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
__snake_case = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__snake_case = -100
__snake_case = tf.convert_to_tensor(a__ )
__snake_case = model(a__ , **a__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__snake_case = self._prepare_for_class(inputs_dict.copy() , a__ , return_labels=a__ )
__snake_case = model(a__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__snake_case = self._prepare_for_class(inputs_dict.copy() , a__ , return_labels=a__ )
# Get keys that were added with the _prepare_for_class function
__snake_case = prepared_for_class.keys() - inputs_dict.keys()
__snake_case = inspect.signature(model.call ).parameters
__snake_case = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__snake_case = {0: '''input_ids'''}
for label_key in label_keys:
__snake_case = signature_names.index(a__ )
__snake_case = label_key
__snake_case = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__snake_case = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__snake_case = prepared_for_class[value]
__snake_case = tuple(a__ )
# Send to model
__snake_case = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def a (self : List[Any] ):
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a__ , a__ , a__ , a__ , a__ , a__ )
def a (self : Tuple ):
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(a__ , a__ , a__ , a__ , a__ , a__ )
def a (self : Any ):
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
a__ , a__ , a__ , a__ , a__ , a__ , a__ )
def a (self : Tuple ):
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
a__ , a__ , a__ , a__ , a__ , a__ , a__ )
def a (self : Optional[int] ):
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
a__ , a__ , a__ , a__ , a__ , a__ , a__ )
@slow
def a (self : int ):
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = TFLayoutLMvaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> str:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : Optional[Any] ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=a__ ) if is_vision_available() else None
@slow
def a (self : List[str] ):
"""simple docstring"""
__snake_case = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''tf''' ).pixel_values
__snake_case = tf.constant([[1, 2]] )
__snake_case = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__snake_case = model(input_ids=a__ , bbox=a__ , pixel_values=a__ , training=a__ )
# verify the logits
__snake_case = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , a__ )
__snake_case = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1E-4 ) )
| 362 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : str = 'sew-d'
def __init__(self : str , a__ : List[Any]=32 , a__ : Optional[int]=768 , a__ : Union[str, Any]=12 , a__ : Tuple=12 , a__ : int=3072 , a__ : List[Any]=2 , a__ : Dict=512 , a__ : Dict=256 , a__ : List[str]=True , a__ : List[str]=True , a__ : str=("p2c", "c2p") , a__ : str="layer_norm" , a__ : str="gelu_python" , a__ : List[Any]=0.1 , a__ : Dict=0.1 , a__ : Optional[Any]=0.1 , a__ : Union[str, Any]=0.0 , a__ : Dict=0.1 , a__ : int=0.0_2 , a__ : str=1E-7 , a__ : Union[str, Any]=1E-5 , a__ : Union[str, Any]="group" , a__ : Optional[Any]="gelu" , a__ : List[Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , a__ : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a__ : Optional[int]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a__ : List[str]=False , a__ : Optional[Any]=128 , a__ : Union[str, Any]=16 , a__ : Optional[Any]=True , a__ : List[Any]=0.0_5 , a__ : Tuple=10 , a__ : str=2 , a__ : int=0.0 , a__ : Optional[Any]=10 , a__ : List[Any]=0 , a__ : Optional[int]="mean" , a__ : List[str]=False , a__ : Tuple=False , a__ : Optional[int]=256 , a__ : str=0 , a__ : List[Any]=1 , a__ : int=2 , **a__ : List[str] , ):
"""simple docstring"""
super().__init__(**a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ )
__snake_case = hidden_size
__snake_case = feat_extract_norm
__snake_case = feat_extract_activation
__snake_case = list(a__ )
__snake_case = list(a__ )
__snake_case = list(a__ )
__snake_case = conv_bias
__snake_case = num_conv_pos_embeddings
__snake_case = num_conv_pos_embedding_groups
__snake_case = len(self.conv_dim )
__snake_case = num_hidden_layers
__snake_case = intermediate_size
__snake_case = squeeze_factor
__snake_case = max_position_embeddings
__snake_case = position_buckets
__snake_case = share_att_key
__snake_case = relative_attention
__snake_case = norm_rel_ebd
__snake_case = list(a__ )
__snake_case = hidden_act
__snake_case = num_attention_heads
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = feat_proj_dropout
__snake_case = final_dropout
__snake_case = layer_norm_eps
__snake_case = feature_layer_norm_eps
__snake_case = initializer_range
__snake_case = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case = apply_spec_augment
__snake_case = mask_time_prob
__snake_case = mask_time_length
__snake_case = mask_time_min_masks
__snake_case = mask_feature_prob
__snake_case = mask_feature_length
__snake_case = mask_feature_min_masks
# ctc loss
__snake_case = ctc_loss_reduction
__snake_case = ctc_zero_infinity
# sequence classification
__snake_case = use_weighted_layer_sum
__snake_case = classifier_proj_size
@property
def a (self : List[Any] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 238 | 0 |
from math import asin, atan, cos, radians, sin, sqrt, tan
__A = 6_37_81_37.0
__A = 6_35_67_52.31_42_45
__A = 6_37_81_37
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ) -> float:
"""simple docstring"""
__lowerCamelCase = (AXIS_A - AXIS_B) / AXIS_A
__lowerCamelCase = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__lowerCamelCase = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__lowerCamelCase = radians(_UpperCAmelCase )
__lowerCamelCase = radians(_UpperCAmelCase )
# Equation
__lowerCamelCase = sin((phi_a - phi_a) / 2 )
__lowerCamelCase = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__lowerCamelCase = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> list[int]:
'''simple docstring'''
lowercase : Tuple = 0
lowercase : int = len(_UpperCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase : int = i + 1
else:
lowercase : List[Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 255 | 0 |
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowercase__ :List[Any] = logging.get_logger(__name__)
lowercase__ :Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase__ :Optional[Any] = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
lowercase__ :Any = {
"facebook/blenderbot_small-90M": 512,
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[Any] =VOCAB_FILES_NAMES
lowercase_ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Dict =BlenderbotSmallTokenizer
def __init__( self ,A__=None ,A__=None ,A__="<|endoftext|>" ,A__="<|endoftext|>" ,A__="<|endoftext|>" ,A__=False ,A__=True ,**A__ ,):
super().__init__(
ByteLevelBPETokenizer(
vocab=A__ ,merges=A__ ,add_prefix_space=A__ ,trim_offsets=A__ ,) ,bos_token=A__ ,eos_token=A__ ,unk_token=A__ ,**A__ ,)
lowercase = add_prefix_space
def A__ ( self ,A__ ,A__=None):
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self ,A__ ,A__ = None):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 356 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ :Union[str, Any] = 16
lowercase__ :Optional[Any] = 32
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 16 ):
'''simple docstring'''
lowercase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowercase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase = 16
elif accelerator.mixed_precision != "no":
lowercase = 8
else:
lowercase = None
return tokenizer.pad(
lowerCAmelCase__ , padding='''longest''' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
lowercase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ :List[str] = mocked_dataloaders # noqa: F811
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowerCAmelCase__ ) == "1":
lowercase = 2
# New Code #
lowercase = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCAmelCase__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase = config['''lr''']
lowercase = int(config['''num_epochs'''] )
lowercase = int(config['''seed'''] )
lowercase = int(config['''batch_size'''] )
lowercase = evaluate.load('''glue''' , '''mrpc''' )
set_seed(lowerCAmelCase__ )
lowercase , lowercase = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase = model.to(accelerator.device )
# Instantiate optimizer
lowercase = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
# Instantiate scheduler
lowercase = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCAmelCase__ ):
lowercase = model(**lowerCAmelCase__ )
lowercase = output.loss
accelerator.backward(lowerCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase , lowercase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , lowerCAmelCase__ )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowerCAmelCase__ , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase = parser.parse_args()
lowercase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 97 | 0 |
"""simple docstring"""
from manim import *
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
A = Rectangle(height=0.5 ,width=0.5 )
A = Rectangle(height=0.25 ,width=0.25 )
A = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
A = [mem.copy() for i in range(6 )]
A = [mem.copy() for i in range(6 )]
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = VGroup(A_ ,A_ ).arrange(A_ ,buff=0 )
A = Text('CPU' ,font_size=24 )
A = Group(A_ ,A_ ).arrange(A_ ,buff=0.5 ,aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
A = [mem.copy() for i in range(4 )]
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = Text('GPU' ,font_size=24 )
A = Group(A_ ,A_ ).arrange(A_ ,buff=0.5 ,aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
A = [mem.copy() for i in range(6 )]
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = Text('Model' ,font_size=24 )
A = Group(A_ ,A_ ).arrange(A_ ,buff=0.5 ,aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
A = []
A = []
A = []
for i, rect in enumerate(A_ ):
rect.set_stroke(A_ )
A = Rectangle(height=0.46 / 4 ,width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(A_ ,opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=A_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] ,direction=A_ ,buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] ,direction=A_ ,buff=0.0 )
self.add(A_ )
model_cpu_arr.append(A_ )
self.add(*A_ ,*A_ ,*A_ )
A = [mem.copy() for i in range(6 )]
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = Text('Loaded Checkpoint' ,font_size=24 )
A = Group(A_ ,A_ ).arrange(A_ ,buff=0.5 ,aligned_edge=A_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(A_ )
A = []
A = []
for i, rect in enumerate(A_ ):
A = fill.copy().set_fill(A_ ,opacity=0.7 )
target.move_to(A_ )
ckpt_arr.append(A_ )
A = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(A_ )
self.add(*A_ ,*A_ )
A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(A_ ,A_ )
A = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(A_ ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(A_ )
A = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
A = [meta_mem.copy() for i in range(6 )]
A = [meta_mem.copy() for i in range(6 )]
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = VGroup(A_ ,A_ ).arrange(A_ ,buff=0 )
A = Text('Disk' ,font_size=24 )
A = Group(A_ ,A_ ).arrange(A_ ,buff=0.5 ,aligned_edge=A_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(A_ ,run_time=3 ) ,Write(A_ ,run_time=1 ) ,Create(A_ ,run_time=1 ) )
A = []
for i, rect in enumerate(A_ ):
A = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(A_ ,run_time=1.5 ) )
self.play(*A_ )
self.play(FadeOut(A_ ) )
A = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ,run_time=3 ) )
self.play(
FadeOut(A_ ,A_ ,*A_ ,*A_ ) ,)
self.wait() | 74 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = ''''''
lowerCamelCase_ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase_ : str = None # compression type in fsspec. ex: "gzip"
lowerCamelCase_ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(self , __magic_name__ = "" , __magic_name__ = None , __magic_name__ = None , **__magic_name__ ) -> Any:
'''simple docstring'''
super().__init__(self , **__magic_name__ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case_ : Union[str, Any] = fsspec.open(
__magic_name__ , mode='''rb''' , protocol=__magic_name__ , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case_ : Tuple = os.path.basename(self.file.path.split('''::''' )[0] )
snake_case_ : Optional[Any] = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
snake_case_ : Dict = None
@classmethod
def lowerCamelCase (cls , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
return super()._strip_protocol(__magic_name__ ).lstrip('''/''' )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
if self.dir_cache is None:
snake_case_ : Optional[int] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
snake_case_ : List[str] = {f['''name''']: f}
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return self.file.open().read()
def lowerCamelCase (self , __magic_name__ , __magic_name__ = "rb" , __magic_name__=None , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = self._strip_protocol(__magic_name__ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''bz2'''
lowerCamelCase_ : Any = '''bz2'''
lowerCamelCase_ : int = '''.bz2'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''gzip'''
lowerCamelCase_ : Dict = '''gzip'''
lowerCamelCase_ : int = '''.gz'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''lz4'''
lowerCamelCase_ : Any = '''lz4'''
lowerCamelCase_ : Optional[Any] = '''.lz4'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''xz'''
lowerCamelCase_ : Any = '''xz'''
lowerCamelCase_ : int = '''.xz'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = '''zstd'''
lowerCamelCase_ : Tuple = '''zstd'''
lowerCamelCase_ : Any = '''.zst'''
def __init__(self , __magic_name__ , __magic_name__ = "rb" , __magic_name__ = None , __magic_name__ = None , __magic_name__ = DEFAULT_BLOCK_SIZE , **__magic_name__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
fo=__magic_name__ , mode=__magic_name__ , target_protocol=__magic_name__ , target_options=__magic_name__ , block_size=__magic_name__ , **__magic_name__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case_ : Dict = self.file.__enter__
class __lowerCAmelCase :
def __init__(self , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = file_
def __enter__(self ) -> List[Any]:
'''simple docstring'''
self._file.__enter__()
return self
def __exit__(self , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
self._file.__exit__(*__magic_name__ , **__magic_name__ )
def __iter__(self ) -> Optional[int]:
'''simple docstring'''
return iter(self._file )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return next(self._file )
def __getattr__(self , __magic_name__ ) -> str:
'''simple docstring'''
return getattr(self._file , __magic_name__ )
def fixed_enter(*__magic_name__ , **__magic_name__ ):
return WrappedFile(_enter(*__magic_name__ , **__magic_name__ ) )
snake_case_ : Tuple = fixed_enter
| 279 | 0 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Path , UpperCamelCase: Union[str, None] = None , UpperCamelCase: Union[List[str], None] = None , UpperCamelCase: Union[str, List[str], None] = None , UpperCamelCase: bool = True , ):
"""simple docstring"""
A__ = [file for file in os.listdir(__snake_case ) if os.path.isfile(os.path.join(__snake_case , __snake_case ) )]
if identifier is not None:
A__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__snake_case , __snake_case ):
for n_ in n_identifier:
A__ = [file for file in files if n_ not in file]
else:
A__ = [file for file in files if n_identifier not in file]
A__ = ignore_files or []
ignore_files.append("""__init__.py""" )
A__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __snake_case )
if only_modules:
A__ = file.split(""".""" )[0]
try:
A__ = getattr(__snake_case , __snake_case )
A__ = doctest.DocTestSuite(__snake_case )
A__ = unittest.TextTestRunner().run(__snake_case )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
A__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Path("""src/transformers""" )
A__ = 'modeling'
A__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(__snake_case , identifier=__snake_case , ignore_files=__snake_case )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = Path("""src/transformers""" )
A__ = 'tokenization'
self.analyze_directory(__snake_case , identifier=__snake_case )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Path("""src/transformers""" )
A__ = 'configuration'
self.analyze_directory(__snake_case , identifier=__snake_case )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = Path("""src/transformers""" )
A__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(__snake_case , n_identifier=__snake_case )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = Path("""docs/source""" )
A__ = ['favicon.ico']
self.analyze_directory(__snake_case , ignore_files=__snake_case , only_modules=__snake_case )
| 357 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: int , UpperCamelCase: str = "▁" , UpperCamelCase: bool = True , UpperCamelCase: Union[str, AddedToken] = "<unk>" , UpperCamelCase: Union[str, AddedToken] = "</s>" , UpperCamelCase: Union[str, AddedToken] = "<pad>" , ):
"""simple docstring"""
A__ = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
A__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
A__ = token_dict["""token"""]
A__ = Tokenizer(Unigram() )
A__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
A__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=UpperCamelCase , add_prefix_space=UpperCamelCase ),
pre_tokenizers.Digits(individual_digits=UpperCamelCase ),
pre_tokenizers.Punctuation(),
] )
A__ = decoders.Metaspace(replacement=UpperCamelCase , add_prefix_space=UpperCamelCase )
A__ = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
A__ = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Union[str, List[str]] , UpperCamelCase: int = 80_00 , UpperCamelCase: bool = True , ):
"""simple docstring"""
A__ = trainers.UnigramTrainer(
vocab_size=UpperCamelCase , special_tokens=self.special_tokens_list , show_progress=UpperCamelCase , )
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = [files]
self._tokenizer.train(UpperCamelCase , trainer=UpperCamelCase )
self.add_unk_id()
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Union[Iterator[str], Iterator[Iterator[str]]] , UpperCamelCase: int = 80_00 , UpperCamelCase: bool = True , ):
"""simple docstring"""
A__ = trainers.UnigramTrainer(
vocab_size=UpperCamelCase , special_tokens=self.special_tokens_list , show_progress=UpperCamelCase , )
self._tokenizer.train_from_iterator(UpperCamelCase , trainer=UpperCamelCase )
self.add_unk_id()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = json.loads(self._tokenizer.to_str() )
A__ = self.special_tokens["""unk"""]["""id"""]
A__ = Tokenizer.from_str(json.dumps(UpperCamelCase ) )
| 69 | 0 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class A ( __UpperCAmelCase ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
def __call__( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowercase__ = 1
lowercase__ = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
lowercase__ = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
lowercase__ = scheduler_output - scheduler_output + torch.ones_like(lowerCamelCase__ )
return result
| 164 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A :
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=30 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=10 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=None , lowerCamelCase__=2 , ) -> Optional[int]:
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 2
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
lowercase__ = DeiTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
lowercase__ = DeiTForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = DeiTForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
lowercase__ = self.type_sequence_label_size
lowercase__ = DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = DeiTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : int = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase : Tuple = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase : Any = False
lowerCamelCase : str = False
lowerCamelCase : str = False
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = DeiTModelTester(self )
lowercase__ = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase__ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Dict:
'''simple docstring'''
lowercase__ = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A__ ( self ) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
lowercase__ = model(**lowerCamelCase__ ).loss
loss.backward()
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ = False
lowercase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase__ = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
lowercase__ = model(**lowerCamelCase__ ).loss
loss.backward()
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase__ ),
*get_values(lowerCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
lowercase__ = problem_type["""title"""]
lowercase__ = problem_type["""num_labels"""]
lowercase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if problem_type["num_labels"] > 1:
lowercase__ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase__ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list:
lowercase__ = model(**lowerCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def A__ ( self ) -> Any:
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = DeiTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _A ( ):
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> int:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
lowerCamelCase__ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase__ )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
lowercase__ = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" )
lowercase__ = inputs.pixel_values.to(lowerCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ = model(lowerCamelCase__ )
| 164 | 1 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_snake_case = logging.get_logger(__name__)
# General docstring
_snake_case = """PoolFormerConfig"""
# Base docstring
_snake_case = """sail/poolformer_s12"""
_snake_case = [1, 512, 7, 7]
# Image classification docstring
_snake_case = """sail/poolformer_s12"""
_snake_case = """tabby, tabby cat"""
_snake_case = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _A ( __magic_name__ , __magic_name__ = 0.0 , __magic_name__ = False ):
if drop_prob == 0.0 or not training:
return input
lowercase__ = 1 - drop_prob
lowercase__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowercase__ = keep_prob + torch.rand(__magic_name__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
lowercase__ = input.div(__magic_name__ ) * random_tensor
return output
class lowerCAmelCase ( nn.Module ):
def __init__( self :Union[str, Any] , _lowercase :Optional[float] = None ):
'''simple docstring'''
super().__init__()
lowercase__ = drop_prob
def UpperCAmelCase ( self :Optional[int] , _lowercase :torch.Tensor ):
'''simple docstring'''
return drop_path(_lowercase , self.drop_prob , self.training )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class lowerCAmelCase ( nn.Module ):
def __init__( self :Tuple , _lowercase :Any , _lowercase :List[Any] , _lowercase :Dict , _lowercase :List[str] , _lowercase :Union[str, Any] , _lowercase :str=None ):
'''simple docstring'''
super().__init__()
lowercase__ = patch_size if isinstance(_lowercase , collections.abc.Iterable ) else (patch_size, patch_size)
lowercase__ = stride if isinstance(_lowercase , collections.abc.Iterable ) else (stride, stride)
lowercase__ = padding if isinstance(_lowercase , collections.abc.Iterable ) else (padding, padding)
lowercase__ = nn.Convad(_lowercase , _lowercase , kernel_size=_lowercase , stride=_lowercase , padding=_lowercase )
lowercase__ = norm_layer(_lowercase ) if norm_layer else nn.Identity()
def UpperCAmelCase ( self :Optional[int] , _lowercase :Any ):
'''simple docstring'''
lowercase__ = self.projection(_lowercase )
lowercase__ = self.norm(_lowercase )
return embeddings
class lowerCAmelCase ( nn.GroupNorm ):
def __init__( self :Union[str, Any] , _lowercase :Dict , **_lowercase :int ):
'''simple docstring'''
super().__init__(1 , _lowercase , **_lowercase )
class lowerCAmelCase ( nn.Module ):
def __init__( self :Any , _lowercase :int ):
'''simple docstring'''
super().__init__()
lowercase__ = nn.AvgPoolad(_lowercase , stride=1 , padding=pool_size // 2 , count_include_pad=_lowercase )
def UpperCAmelCase ( self :Tuple , _lowercase :Tuple ):
'''simple docstring'''
return self.pool(_lowercase ) - hidden_states
class lowerCAmelCase ( nn.Module ):
def __init__( self :Any , _lowercase :Tuple , _lowercase :Optional[Any] , _lowercase :Dict , _lowercase :int ):
'''simple docstring'''
super().__init__()
lowercase__ = nn.Convad(_lowercase , _lowercase , 1 )
lowercase__ = nn.Convad(_lowercase , _lowercase , 1 )
lowercase__ = PoolFormerDropPath(_lowercase )
if isinstance(config.hidden_act , _lowercase ):
lowercase__ = ACTaFN[config.hidden_act]
else:
lowercase__ = config.hidden_act
def UpperCAmelCase ( self :List[Any] , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = self.conva(_lowercase )
lowercase__ = self.act_fn(_lowercase )
lowercase__ = self.drop(_lowercase )
lowercase__ = self.conva(_lowercase )
lowercase__ = self.drop(_lowercase )
return hidden_states
class lowerCAmelCase ( nn.Module ):
def __init__( self :List[str] , _lowercase :Tuple , _lowercase :Optional[Any] , _lowercase :Any , _lowercase :Tuple , _lowercase :List[Any] , _lowercase :List[Any] ):
'''simple docstring'''
super().__init__()
lowercase__ = PoolFormerPooling(_lowercase )
lowercase__ = PoolFormerOutput(_lowercase , _lowercase , _lowercase , _lowercase )
lowercase__ = PoolFormerGroupNorm(_lowercase )
lowercase__ = PoolFormerGroupNorm(_lowercase )
# Useful for training neural nets
lowercase__ = PoolFormerDropPath(_lowercase ) if drop_path > 0.0 else nn.Identity()
lowercase__ = config.use_layer_scale
if config.use_layer_scale:
lowercase__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((_lowercase) ) , requires_grad=_lowercase )
lowercase__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((_lowercase) ) , requires_grad=_lowercase )
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :List[Any] ):
'''simple docstring'''
if self.use_layer_scale:
lowercase__ = self.pooling(self.before_norm(_lowercase ) )
lowercase__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowercase__ = hidden_states + self.drop_path(_lowercase )
lowercase__ = ()
lowercase__ = self.output(self.after_norm(_lowercase ) )
lowercase__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowercase__ = hidden_states + self.drop_path(_lowercase )
lowercase__ = (output,) + outputs
return outputs
else:
lowercase__ = self.drop_path(self.pooling(self.before_norm(_lowercase ) ) )
# First residual connection
lowercase__ = pooling_output + hidden_states
lowercase__ = ()
# Second residual connection inside the PoolFormerOutput block
lowercase__ = self.drop_path(self.output(self.after_norm(_lowercase ) ) )
lowercase__ = hidden_states + layer_output
lowercase__ = (output,) + outputs
return outputs
class lowerCAmelCase ( nn.Module ):
def __init__( self :str , _lowercase :Optional[Any] ):
'''simple docstring'''
super().__init__()
lowercase__ = config
# stochastic depth decay rule
lowercase__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowercase__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowercase__ = nn.ModuleList(_lowercase )
# Transformer blocks
lowercase__ = []
lowercase__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowercase__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_lowercase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_lowercase ) )
lowercase__ = nn.ModuleList(_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple , _lowercase :str=False , _lowercase :Any=True ):
'''simple docstring'''
lowercase__ = () if output_hidden_states else None
lowercase__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowercase__ , lowercase__ = layers
# Get patch embeddings from hidden_states
lowercase__ = embedding_layer(_lowercase )
# Send the embeddings through the blocks
for _, blk in enumerate(_lowercase ):
lowercase__ = blk(_lowercase )
lowercase__ = layer_outputs[0]
if output_hidden_states:
lowercase__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_lowercase , hidden_states=_lowercase )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = PoolFormerConfig
__lowerCamelCase = 'poolformer'
__lowerCamelCase = 'pixel_values'
__lowerCamelCase = True
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Tuple ):
'''simple docstring'''
if isinstance(_lowercase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_lowercase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Dict , _lowercase :Optional[Any]=False ):
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
lowercase__ = value
_snake_case = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_snake_case = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , lowercase_ , )
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Optional[int] , _lowercase :Optional[Any] ):
'''simple docstring'''
super().__init__(_lowercase )
lowercase__ = config
lowercase__ = PoolFormerEncoder(_lowercase )
# Initialize weights and apply final processing
self.post_init()
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowercase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[bool] = None , _lowercase :Optional[bool] = None , ):
'''simple docstring'''
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
lowercase__ = self.encoder(
_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase , )
lowercase__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_lowercase , hidden_states=encoder_outputs.hidden_states , )
class lowerCAmelCase ( nn.Module ):
def __init__( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
super().__init__()
lowercase__ = nn.Linear(config.hidden_size , config.hidden_size )
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = self.dense(_lowercase )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , lowercase_ , )
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[str] , _lowercase :Optional[Any] ):
'''simple docstring'''
super().__init__(_lowercase )
lowercase__ = config.num_labels
lowercase__ = PoolFormerModel(_lowercase )
# Final norm
lowercase__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowercase__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[torch.LongTensor] = None , _lowercase :Optional[bool] = None , _lowercase :Optional[bool] = None , ):
'''simple docstring'''
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.poolformer(
_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase , )
lowercase__ = outputs[0]
lowercase__ = self.classifier(self.norm(_lowercase ).mean([-2, -1] ) )
lowercase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ = "single_label_classification"
else:
lowercase__ = "multi_label_classification"
if self.config.problem_type == "regression":
lowercase__ = MSELoss()
if self.num_labels == 1:
lowercase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__ = loss_fct(_lowercase , _lowercase )
elif self.config.problem_type == "single_label_classification":
lowercase__ = CrossEntropyLoss()
lowercase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ = BCEWithLogitsLoss()
lowercase__ = loss_fct(_lowercase , _lowercase )
if not return_dict:
lowercase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_lowercase , logits=_lowercase , hidden_states=outputs.hidden_states )
| 201 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCAmelCase ( lowercase_ ):
def __init__( self :str , _lowercase :Optional[NestedDataStructureLike[PathLike]] = None , _lowercase :Optional[NamedSplit] = None , _lowercase :Optional[Features] = None , _lowercase :str = None , _lowercase :bool = False , _lowercase :bool = False , _lowercase :Optional[int] = None , **_lowercase :Tuple , ):
'''simple docstring'''
lowercase__ = path_or_paths
lowercase__ = split if split or isinstance(_lowercase , _lowercase ) else "train"
lowercase__ = features
lowercase__ = cache_dir
lowercase__ = keep_in_memory
lowercase__ = streaming
lowercase__ = num_proc
lowercase__ = kwargs
@abstractmethod
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
pass
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[Any] , _lowercase :Optional[Features] = None , _lowercase :str = None , _lowercase :bool = False , _lowercase :bool = False , _lowercase :Optional[int] = None , **_lowercase :Optional[int] , ):
'''simple docstring'''
lowercase__ = features
lowercase__ = cache_dir
lowercase__ = keep_in_memory
lowercase__ = streaming
lowercase__ = num_proc
lowercase__ = kwargs
@abstractmethod
def UpperCAmelCase ( self :int ):
'''simple docstring'''
pass
| 201 | 1 |
def a__ ( _UpperCamelCase : Union[str, Any] ):
__lowerCamelCase ,__lowerCamelCase = [], []
while len(_UpperCamelCase ) > 1:
__lowerCamelCase ,__lowerCamelCase = min(_UpperCamelCase ), max(_UpperCamelCase )
start.append(_UpperCamelCase )
end.append(_UpperCamelCase )
collection.remove(_UpperCamelCase )
collection.remove(_UpperCamelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
a_ = input("""Enter numbers separated by a comma:\n""").strip()
a_ = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 330 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : int = 16 ):
__lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' )
def tokenize_function(_UpperCamelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase = datasets.map(
_UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(_UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCamelCase = 8
else:
__lowerCamelCase = None
return tokenizer.pad(
_UpperCamelCase ,padding='''longest''' ,max_length=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_tensors='''pt''' ,)
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets['''train'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase )
__lowerCamelCase = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ = mocked_dataloaders # noqa: F811
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,_UpperCamelCase ) == "1":
__lowerCamelCase = 2
# Initialize accelerator
__lowerCamelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config['''lr''']
__lowerCamelCase = int(config['''num_epochs'''] )
__lowerCamelCase = int(config['''seed'''] )
__lowerCamelCase = int(config['''batch_size'''] )
__lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_UpperCamelCase )
def inner_training_loop(_UpperCamelCase : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase = AdamW(params=model.parameters() ,lr=_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = get_dataloaders(_UpperCamelCase ,_UpperCamelCase )
# Instantiate scheduler
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(_UpperCamelCase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase ,references=_UpperCamelCase ,)
__lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,_UpperCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ):
__lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase ,_UpperCamelCase )
if __name__ == "__main__":
main()
| 330 | 1 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
A_ :List[Any] = '''sshleifer/mar_enro_6_3_student'''
class __A ( snake_case_ ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
super().setUp()
__UpperCamelCase : Optional[Any] =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=lowerCamelCase__ , )
__UpperCamelCase : Optional[int] =f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def __lowercase ( self ):
"""simple docstring"""
MarianMTModel.from_pretrained(lowerCamelCase__ )
@slow
@require_torch_gpu
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
__UpperCamelCase : str =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
__UpperCamelCase : Optional[int] =bash_script.replace('\\\n' , '' ).strip().replace('\"$@\"' , '' )
for k, v in env_vars_to_replace.items():
__UpperCamelCase : List[Any] =bash_script.replace(lowerCamelCase__ , str(lowerCamelCase__ ) )
__UpperCamelCase : int =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__UpperCamelCase : Any =f'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__UpperCamelCase : int =['finetune.py'] + bash_script.split() + args
with patch.object(lowerCamelCase__ , 'argv' , lowerCamelCase__ ):
__UpperCamelCase : Any =argparse.ArgumentParser()
__UpperCamelCase : Tuple =pl.Trainer.add_argparse_args(lowerCamelCase__ )
__UpperCamelCase : Dict =SummarizationModule.add_model_specific_args(lowerCamelCase__ , os.getcwd() )
__UpperCamelCase : Optional[int] =parser.parse_args()
__UpperCamelCase : Union[str, Any] =main(lowerCamelCase__ )
# Check metrics
__UpperCamelCase : Optional[int] =load_json(model.metrics_save_path )
__UpperCamelCase : List[str] =metrics['val'][0]
__UpperCamelCase : str =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] , lowerCamelCase__ )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__UpperCamelCase : Union[str, Any] =os.listdir(lowerCamelCase__ )
__UpperCamelCase : Optional[int] =[x for x in contents if x.endswith('.ckpt' )][0]
__UpperCamelCase : Union[str, Any] =os.path.join(args.output_dir , lowerCamelCase__ )
__UpperCamelCase : Dict =torch.load(lowerCamelCase__ , map_location='cpu' )
__UpperCamelCase : int ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__UpperCamelCase : Dict ={os.path.basename(lowerCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class __A ( snake_case_ ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =f'{self.test_file_dir_str}/test_data/wmt_en_ro'
__UpperCamelCase : Any ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
__UpperCamelCase : Tuple =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
__UpperCamelCase : int =bash_script.replace('\\\n' , '' ).strip().replace('\"$@\"' , '' )
__UpperCamelCase : int =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
__UpperCamelCase : Optional[Any] =bash_script.replace(lowerCamelCase__ , str(lowerCamelCase__ ) )
__UpperCamelCase : Union[str, Any] =self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict =bash_script.replace('--fp16' , '' )
__UpperCamelCase : Dict =6
__UpperCamelCase : Union[str, Any] =(
['distillation.py']
+ bash_script.split()
+ [
f'--output_dir={output_dir}',
'--gpus=1',
'--learning_rate=1e-3',
f'--num_train_epochs={epochs}',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(lowerCamelCase__ , 'argv' , lowerCamelCase__ ):
__UpperCamelCase : str =argparse.ArgumentParser()
__UpperCamelCase : List[Any] =pl.Trainer.add_argparse_args(lowerCamelCase__ )
__UpperCamelCase : Dict =SummarizationDistiller.add_model_specific_args(lowerCamelCase__ , os.getcwd() )
__UpperCamelCase : Any =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__UpperCamelCase : List[str] =distill_main(lowerCamelCase__ )
# Check metrics
__UpperCamelCase : Optional[int] =load_json(model.metrics_save_path )
__UpperCamelCase : Any =metrics['val'][0]
__UpperCamelCase : List[Any] =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] , lowerCamelCase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
__UpperCamelCase : int =os.listdir(lowerCamelCase__ )
__UpperCamelCase : Any =[x for x in contents if x.endswith('.ckpt' )][0]
__UpperCamelCase : Dict =os.path.join(args.output_dir , lowerCamelCase__ )
__UpperCamelCase : Any =torch.load(lowerCamelCase__ , map_location='cpu' )
__UpperCamelCase : Optional[Any] ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__UpperCamelCase : List[Any] ={os.path.basename(lowerCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 353 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( a , a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : List[Any] =AltDiffusionPipeline
UpperCamelCase__ : Optional[Any] =TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase__ : Any =TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__UpperCamelCase : Tuple =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
__UpperCamelCase : Dict =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
__UpperCamelCase : Optional[int] =CLIPTextModel(lowerCamelCase__ )
__UpperCamelCase : int =XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
__UpperCamelCase : Dict =77
__UpperCamelCase : List[str] ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : Optional[Any] =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : List[Any] =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Tuple ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : List[str] =self.get_dummy_components()
torch.manual_seed(0 )
__UpperCamelCase : List[str] =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
__UpperCamelCase : List[str] =RobertaSeriesModelWithTransformation(lowerCamelCase__ )
__UpperCamelCase : Any =text_encoder
__UpperCamelCase : int =AltDiffusionPipeline(**lowerCamelCase__ )
__UpperCamelCase : Any =alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : Tuple ='A photo of an astronaut'
__UpperCamelCase : str =alt_pipe(**lowerCamelCase__ )
__UpperCamelCase : Any =output.images
__UpperCamelCase : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase : Dict =np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Optional[int] =self.get_dummy_components()
__UpperCamelCase : int =PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
__UpperCamelCase : Optional[Any] =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
__UpperCamelCase : Tuple =RobertaSeriesModelWithTransformation(lowerCamelCase__ )
__UpperCamelCase : List[str] =text_encoder
__UpperCamelCase : List[str] =AltDiffusionPipeline(**lowerCamelCase__ )
__UpperCamelCase : Dict =alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Dict =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =alt_pipe(**lowerCamelCase__ )
__UpperCamelCase : str =output.images
__UpperCamelCase : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase : Dict =np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=lowerCamelCase__ )
__UpperCamelCase : List[str] =alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Any ='A painting of a squirrel eating a burger'
__UpperCamelCase : Optional[int] =torch.manual_seed(0 )
__UpperCamelCase : Dict =alt_pipe([prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
__UpperCamelCase : List[str] =output.images
__UpperCamelCase : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase : Optional[int] =np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
__UpperCamelCase : Tuple =AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ )
__UpperCamelCase : Any =alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] ='A painting of a squirrel eating a burger'
__UpperCamelCase : Optional[int] =torch.manual_seed(0 )
__UpperCamelCase : Tuple =alt_pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='numpy' )
__UpperCamelCase : List[str] =output.images
__UpperCamelCase : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase : List[str] =np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 245 | 0 |
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , "embed_dim" ) )
self.parent.assertTrue(hasattr(__A , "num_heads" ) )
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A , __A=13 , __A=64 , __A=3 , __A=[16, 48, 96] , __A=[1, 3, 6] , __A=[1, 2, 10] , __A=[7, 3, 3] , __A=[4, 2, 2] , __A=[2, 1, 1] , __A=[2, 2, 2] , __A=[False, False, True] , __A=[0.0, 0.0, 0.0] , __A=0.02 , __A=1e-12 , __A=True , __A=True , __A=2 , ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Union[str, Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : List[str] = patch_sizes
lowerCamelCase : Tuple = patch_stride
lowerCamelCase : Dict = patch_padding
lowerCamelCase : Optional[int] = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : Optional[Any] = num_labels
lowerCamelCase : List[str] = num_channels
lowerCamelCase : List[Any] = embed_dim
lowerCamelCase : int = num_heads
lowerCamelCase : List[Any] = stride_kv
lowerCamelCase : Dict = depth
lowerCamelCase : Any = cls_token
lowerCamelCase : List[Any] = attention_drop_rate
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Optional[int] = layer_norm_eps
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Optional[int] = None
if self.use_labels:
# create a random int32 tensor of given shape
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _snake_case ( self , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Any = TFCvtModel(config=__A )
lowerCamelCase : Tuple = model(__A , training=__A )
lowerCamelCase : Optional[Any] = (self.image_size, self.image_size)
lowerCamelCase , lowerCamelCase : Any = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowerCamelCase : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowerCamelCase : List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _snake_case ( self , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.num_labels
lowerCamelCase : Optional[int] = TFCvtForImageClassification(__A )
lowerCamelCase : Dict = model(__A , labels=__A , training=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = config_and_inputs
lowerCamelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : Dict = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__A : Tuple = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
__A : List[str] = False
__A : Tuple = False
__A : Tuple = False
__A : Union[str, Any] = False
__A : Any = False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = TFCvtModelTester(self )
lowerCamelCase : List[Any] = TFCvtConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def _snake_case ( self ):
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def _snake_case ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(__A )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__A )
lowerCamelCase : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[int] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(__A , __A , __A ):
lowerCamelCase : List[Any] = model_class(__A )
lowerCamelCase : Tuple = model(**self._prepare_for_class(__A , __A ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Dict = len(self.model_tester.depth )
self.assertEqual(len(__A ) , __A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__A , __A , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = TFCvtModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase : List[Any] = self.default_image_processor
lowerCamelCase : str = prepare_img()
lowerCamelCase : int = image_processor(images=__A , return_tensors="tf" )
# forward pass
lowerCamelCase : Optional[int] = model(**__A )
# verify the logits
lowerCamelCase : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
lowerCamelCase : Any = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __A , atol=1e-4 ) )
| 283 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
lowerCamelCase : Any = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCamelCase : List[Any] = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCamelCase : Optional[Any] = {"unk_token": "<unk>"}
lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
lowerCamelCase : str = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
lowerCamelCase : str = os.path.join(self.tmpdirname , __A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__A , __A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase : Tuple = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : Tuple = self.get_image_processor()
lowerCamelCase : List[Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __A )
self.assertIsInstance(processor_fast.tokenizer , __A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __A )
self.assertIsInstance(processor_fast.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase : List[str] = self.get_image_processor(do_normalize=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = self.prepare_image_inputs()
lowerCamelCase : int = image_processor(__A , return_tensors="np" )
lowerCamelCase : Union[str, Any] = processor(images=__A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.get_image_processor()
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = "lower newer"
lowerCamelCase : Union[str, Any] = processor(text=__A , return_tensors="np" )
lowerCamelCase : List[Any] = tokenizer(__A , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : Any = self.get_tokenizer()
lowerCamelCase : int = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[Any] = "lower newer"
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Any = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = "google/owlvit-base-patch32"
lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Tuple = ["cat", "nasa badge"]
lowerCamelCase : str = processor(text=__A )
lowerCamelCase : Union[str, Any] = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = "google/owlvit-base-patch32"
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Dict = [["cat", "nasa badge"], ["person"]]
lowerCamelCase : int = processor(text=__A )
lowerCamelCase : Tuple = 16
lowerCamelCase : Any = len(__A )
lowerCamelCase : Optional[Any] = max([len(__A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = "google/owlvit-base-patch32"
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : List[Any] = ["cat", "nasa badge"]
lowerCamelCase : Optional[Any] = processor(text=__A )
lowerCamelCase : int = 16
lowerCamelCase : List[str] = inputs["input_ids"]
lowerCamelCase : int = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : str = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase : Any = processor(images=__A , query_images=__A )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase : List[Any] = processor.batch_decode(__A )
lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
| 283 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : str = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 351 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCamelCase_ :
lowercase = MBartConfig
lowercase = {}
lowercase = 'gelu'
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=20 , A=2 , A=1 , A=0 , ) -> Optional[int]:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : str = is_training
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : Optional[Any] = eos_token_id
UpperCAmelCase : List[str] = pad_token_id
UpperCAmelCase : List[Any] = bos_token_id
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : List[Any] = prepare_mbart_inputs_dict(A , A , A )
return config, inputs_dict
def _lowercase( self , A , A ) -> List[str]:
UpperCAmelCase : List[str] = TFMBartModel(config=A ).get_decoder()
UpperCAmelCase : int = inputs_dict["""input_ids"""]
UpperCAmelCase : str = input_ids[:1, :]
UpperCAmelCase : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase : List[str] = inputs_dict["""head_mask"""]
UpperCAmelCase : List[Any] = 1
# first forward pass
UpperCAmelCase : List[str] = model(A , attention_mask=A , head_mask=A , use_cache=A )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = outputs.to_tuple()
UpperCAmelCase : int = past_key_values[1]
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> List[str]:
if attention_mask is None:
UpperCAmelCase : Tuple = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
def _lowercase( self , A , A , A , A , A ) -> int:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : int = TFMBartModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=A )
def _lowercase( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> Dict:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = [
' UN Chief Says There Is No Military Solution in Syria',
]
lowercase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
lowercase = 'facebook/mbart-large-en-ro'
@cached_property
def _lowercase( self ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase( self , **A ) -> Any:
UpperCAmelCase : Optional[int] = self.translate_src_text(**A )
self.assertListEqual(self.expected_text , A )
def _lowercase( self , **A ) -> Optional[Any]:
UpperCAmelCase : List[str] = self.tokenizer(self.src_text , **A , return_tensors="""tf""" )
UpperCAmelCase : int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase : Any = self.tokenizer.batch_decode(A , skip_special_tokens=A )
return generated_words
@slow
def _lowercase( self ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 338 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
for attribute in key.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
__SCREAMING_SNAKE_CASE = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
__SCREAMING_SNAKE_CASE = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
__SCREAMING_SNAKE_CASE = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == """group""" , )
__SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
__SCREAMING_SNAKE_CASE = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE = name.split(UpperCamelCase_ )[0].split(""".""" )[-2]
__SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , UpperCamelCase_ )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE = """weight_g"""
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE = """weight_v"""
elif "weight" in name:
__SCREAMING_SNAKE_CASE = """weight"""
elif "bias" in name:
__SCREAMING_SNAKE_CASE = """bias"""
else:
__SCREAMING_SNAKE_CASE = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1]
__SCREAMING_SNAKE_CASE = name.split(""".""" )
__SCREAMING_SNAKE_CASE = int(items[0] )
__SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = SEWConfig()
if is_finetuned:
__SCREAMING_SNAKE_CASE = model.wav_encoder.wav_model.cfg
else:
__SCREAMING_SNAKE_CASE = model.cfg
__SCREAMING_SNAKE_CASE = fs_config.conv_bias
__SCREAMING_SNAKE_CASE = eval(fs_config.conv_feature_layers )
__SCREAMING_SNAKE_CASE = [x[0] for x in conv_layers]
__SCREAMING_SNAKE_CASE = [x[1] for x in conv_layers]
__SCREAMING_SNAKE_CASE = [x[2] for x in conv_layers]
__SCREAMING_SNAKE_CASE = """gelu"""
__SCREAMING_SNAKE_CASE = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
__SCREAMING_SNAKE_CASE = 0.0
__SCREAMING_SNAKE_CASE = fs_config.activation_fn.name
__SCREAMING_SNAKE_CASE = fs_config.encoder_embed_dim
__SCREAMING_SNAKE_CASE = 0.02
__SCREAMING_SNAKE_CASE = fs_config.encoder_ffn_embed_dim
__SCREAMING_SNAKE_CASE = 1e-5
__SCREAMING_SNAKE_CASE = fs_config.encoder_layerdrop
__SCREAMING_SNAKE_CASE = fs_config.encoder_attention_heads
__SCREAMING_SNAKE_CASE = fs_config.conv_pos_groups
__SCREAMING_SNAKE_CASE = fs_config.conv_pos
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = fs_config.encoder_layers
__SCREAMING_SNAKE_CASE = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__SCREAMING_SNAKE_CASE = model.cfg
__SCREAMING_SNAKE_CASE = fs_config.final_dropout
__SCREAMING_SNAKE_CASE = fs_config.layerdrop
__SCREAMING_SNAKE_CASE = fs_config.activation_dropout
__SCREAMING_SNAKE_CASE = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__SCREAMING_SNAKE_CASE = fs_config.attention_dropout
__SCREAMING_SNAKE_CASE = fs_config.dropout_input
__SCREAMING_SNAKE_CASE = fs_config.dropout
__SCREAMING_SNAKE_CASE = fs_config.mask_channel_length
__SCREAMING_SNAKE_CASE = fs_config.mask_channel_prob
__SCREAMING_SNAKE_CASE = fs_config.mask_length
__SCREAMING_SNAKE_CASE = fs_config.mask_prob
__SCREAMING_SNAKE_CASE = """Wav2Vec2FeatureExtractor"""
__SCREAMING_SNAKE_CASE = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True ):
if is_finetuned:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__SCREAMING_SNAKE_CASE = SEWConfig.from_pretrained(UpperCamelCase_ )
else:
__SCREAMING_SNAKE_CASE = convert_config(model[0] , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = model[0].eval()
__SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
if is_finetuned:
if dict_path:
__SCREAMING_SNAKE_CASE = Dictionary.load(UpperCamelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__SCREAMING_SNAKE_CASE = target_dict.pad_index
__SCREAMING_SNAKE_CASE = target_dict.bos_index
__SCREAMING_SNAKE_CASE = target_dict.pad_index
__SCREAMING_SNAKE_CASE = target_dict.bos_index
__SCREAMING_SNAKE_CASE = target_dict.eos_index
__SCREAMING_SNAKE_CASE = len(target_dict.symbols )
__SCREAMING_SNAKE_CASE = os.path.join(UpperCamelCase_ , """vocab.json""" )
if not os.path.isdir(UpperCamelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCamelCase_ ) )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
UpperCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=UpperCamelCase_ , )
__SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = SEWForCTC(UpperCamelCase_ )
else:
__SCREAMING_SNAKE_CASE = SEWModel(UpperCamelCase_ )
feature_extractor.save_pretrained(UpperCamelCase_ )
recursively_load_weights(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
hf_model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__magic_name__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 100 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
__A : str = '''pytorch_model.bin'''
__A : Optional[int] = '''pytorch_model.bin.index.json'''
__A : Any = '''adapter_config.json'''
__A : int = '''adapter_model.bin'''
__A : Union[str, Any] = '''adapter_model.safetensors'''
__A : int = '''tf_model.h5'''
__A : Dict = '''tf_model.h5.index.json'''
__A : Dict = '''model.ckpt'''
__A : Optional[int] = '''flax_model.msgpack'''
__A : Tuple = '''flax_model.msgpack.index.json'''
__A : Any = '''model.safetensors'''
__A : Dict = '''model.safetensors.index.json'''
__A : Dict = '''config.json'''
__A : int = '''preprocessor_config.json'''
__A : Optional[Any] = FEATURE_EXTRACTOR_NAME
__A : Any = '''generation_config.json'''
__A : str = '''modelcard.json'''
__A : str = '''▁'''
__A : Union[str, Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
__A : List[str] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
__A : List[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
__A : Tuple = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if version.parse(_UpperCAmelCase ) < version.parse(_UpperCAmelCase ):
if "dev" in min_version:
lowerCAmelCase : Tuple = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
lowerCAmelCase : Any = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 138 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
A__ = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : str = path + '''.py'''
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
"""simple docstring"""
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : str = path + '''.py'''
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : List[str] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : str = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
snake_case__ : int = expected_configs[0]
assert expected_config in infos
snake_case__ : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : Union[str, Any] = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
snake_case__ : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
"""simple docstring"""
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 44 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Dict = TransfoXLTokenizer
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : List[str] = False
def __lowerCamelCase ( self :Union[str, Any] ):
super().setUp()
snake_case__ : Optional[int] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCamelCase ( self :int ,**__lowercase :Any ):
snake_case__ : str = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname ,**__lowercase )
def __lowerCamelCase ( self :int ,__lowercase :Optional[int] ):
snake_case__ : int = '''<unk> UNwanted , running'''
snake_case__ : List[Any] = '''<unk> unwanted, running'''
return input_text, output_text
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[Any] = TransfoXLTokenizer(vocab_file=self.vocab_file ,lower_case=__lowercase )
snake_case__ : Tuple = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(__lowercase ,['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) ,[0, 4, 8, 7] )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[Any] = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Optional[Any] = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Any = TransfoXLTokenizer(lower_case=__lowercase )
snake_case__ : List[str] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
snake_case__ : Union[str, Any] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(__lowercase ) ,__lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ) ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Any = self.get_tokenizer()
snake_case__ : Optional[Any] = len(__lowercase )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' ,1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowercase ) ,original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) ,[1] )
self.assertEqual(tokenizer.decode([1] ) ,'''new1''' )
| 44 | 1 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _lowercase ( _lowerCamelCase ):
"""simple docstring"""
__A = (DPMSolverSDEScheduler,)
__A = 10
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
a = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**_A )
return config
def UpperCamelCase_ (self ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCamelCase_ (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def UpperCamelCase_ (self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_A )
def UpperCamelCase_ (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
a = self.dummy_model()
a = self.dummy_sample_deter * scheduler.init_noise_sigma
a = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
a = scheduler.scale_model_input(_A , _A )
a = model(_A , _A )
a = scheduler.step(_A , _A , _A )
a = output.prev_sample
a = torch.sum(torch.abs(_A ) )
a = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config(prediction_type="v_prediction" )
a = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
a = self.dummy_model()
a = self.dummy_sample_deter * scheduler.init_noise_sigma
a = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
a = scheduler.scale_model_input(_A , _A )
a = model(_A , _A )
a = scheduler.step(_A , _A , _A )
a = output.prev_sample
a = torch.sum(torch.abs(_A ) )
a = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
a = self.dummy_model()
a = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a = scheduler.scale_model_input(_A , _A )
a = model(_A , _A )
a = scheduler.step(_A , _A , _A )
a = output.prev_sample
a = torch.sum(torch.abs(_A ) )
a = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**_A , use_karras_sigmas=_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
a = self.dummy_model()
a = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
a = sample.to(_A )
for t in scheduler.timesteps:
a = scheduler.scale_model_input(_A , _A )
a = model(_A , _A )
a = scheduler.step(_A , _A , _A )
a = output.prev_sample
a = torch.sum(torch.abs(_A ) )
a = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 227 |
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''') | 308 | 0 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=2 , _lowerCamelCase=56 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=2 , _lowerCamelCase=7 , _lowerCamelCase="gelu_new" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=4 , _lowerCamelCase="block_sparse" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=3 , ) ->List[str]:
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : int = seq_length
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : int = use_attention_mask
SCREAMING_SNAKE_CASE : Dict = use_token_type_ids
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : int = num_choices
SCREAMING_SNAKE_CASE : Dict = rescale_embeddings
SCREAMING_SNAKE_CASE : Dict = attention_type
SCREAMING_SNAKE_CASE : Tuple = use_bias
SCREAMING_SNAKE_CASE : int = block_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_random_blocks
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : str = False
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Dict = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowerCAmelCase ( self ) ->str:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowerCAmelCase ( self ) ->List[str]:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowerCAmelCase ( self ) ->List[str]:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowerCAmelCase ( self ) ->Union[str, Any]:
super().test_hidden_states_output()
@slow
def __lowerCAmelCase ( self ) ->List[Any]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
@jax.jit
def model_jitted(_lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
return model(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , **_lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE : str = model_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : str = model_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1e-5 , _lowerCamelCase="outputs" , _lowerCamelCase=None ) ->Any:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 19 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Tuple = '''▁'''
a__ : List[Any] = {'''vocab_file''': '''spiece.model'''}
a__ : Optional[Any] = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
a__ : str = {
'''google/pegasus-xsum''': 512,
}
a__ : str = logging.get_logger(__name__)
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase , _lowerCamelCase="<pad>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<mask_2>" , _lowerCamelCase="<mask_1>" , _lowerCamelCase=None , _lowerCamelCase=103 , _lowerCamelCase = None , **_lowerCamelCase , ) ->None:
SCREAMING_SNAKE_CASE : Dict = offset
if additional_special_tokens is not None:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError(
F"""additional_special_tokens should be of type {type(_lowerCamelCase )}, but is"""
F""" {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(_lowerCamelCase ) , self.offset - 1 )
]
if len(set(_lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
SCREAMING_SNAKE_CASE : Dict = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE : str = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , mask_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token_sent=_lowerCamelCase , offset=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : List[str] = mask_token_sent
SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
SCREAMING_SNAKE_CASE : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
# add special tokens to encoder dict
SCREAMING_SNAKE_CASE : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
SCREAMING_SNAKE_CASE : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def __lowerCAmelCase ( self ) ->int:
return len(self.sp_model ) + self.offset
def __lowerCAmelCase ( self ) ->Dict[str, int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = self.__dict__.copy()
SCREAMING_SNAKE_CASE : str = None
return state
def __setstate__( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : List[str] = {}
SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
SCREAMING_SNAKE_CASE : List[str] = self.sp_model.piece_to_id(_lowerCamelCase )
return sp_id + self.offset
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
SCREAMING_SNAKE_CASE : Dict = self.sp_model.IdToPiece(index - self.offset )
return token
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : int = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->str:
return 1
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : Dict = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) ->List[int]:
if already_has_special_tokens:
return self._special_token_mask(_lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(_lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : int = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : Tuple = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 19 | 1 |
'''simple docstring'''
import functools
from typing import Any
def UpperCAmelCase_ ( __lowercase : str , __lowercase : list[str] ) -> bool:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ) or len(__lowercase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(__lowercase , __lowercase ) or not all(
isinstance(__lowercase , __lowercase ) and len(__lowercase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
_UpperCAmelCase = {}
_UpperCAmelCase = "WORD_KEEPER"
for word in words:
_UpperCAmelCase = trie
for c in word:
if c not in trie_node:
_UpperCAmelCase = {}
_UpperCAmelCase = trie_node[c]
_UpperCAmelCase = True
_UpperCAmelCase = len(__lowercase )
# Dynamic programming method
@functools.cache
def is_breakable(__lowercase : int ) -> bool:
if index == len_string:
return True
_UpperCAmelCase = trie
for i in range(__lowercase , __lowercase ):
_UpperCAmelCase = trie_node.get(string[i] , __lowercase )
if trie_node is None:
return False
if trie_node.get(__lowercase , __lowercase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = TapasConfig.from_json_file(__lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_4694
_UpperCAmelCase = 0.20_7951
_UpperCAmelCase = 0.12_1194
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.035_2513
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.4519
_UpperCAmelCase = 0.90_3421
_UpperCAmelCase = 222.088
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_3141
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=__lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=__lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=__lowercase )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__lowercase , __lowercase , __lowercase )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowercase )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(__lowercase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 22 | 1 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCamelCase = 100
UpperCamelCase = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCamelCase = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def SCREAMING_SNAKE_CASE( __lowercase ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
A: set[int] = set()
A: int
A: int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def SCREAMING_SNAKE_CASE( __lowercase = 5_0_0_0 ) -> int | None:
for number_to_partition in range(1 , __lowercase ):
if len(partition(__lowercase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 334 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
pass
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> None:
'''simple docstring'''
A: Any = data
A: Node | None = None
def __iter__( self : Optional[int] ) -> List[str]:
'''simple docstring'''
A: List[str] = self
A: Dict = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(SCREAMING_SNAKE_CASE_ )
yield node.data
A: str = node.next_node
@property
def _snake_case ( self : List[str] ) -> bool:
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
UpperCamelCase = Node(1)
UpperCamelCase = Node(2)
UpperCamelCase = Node(3)
UpperCamelCase = Node(4)
print(root_node.has_loop) # False
UpperCamelCase = root_node.next_node
print(root_node.has_loop) # True
UpperCamelCase = Node(5)
UpperCamelCase = Node(6)
UpperCamelCase = Node(5)
UpperCamelCase = Node(6)
print(root_node.has_loop) # False
UpperCamelCase = Node(1)
print(root_node.has_loop) # False
| 334 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Optional[int] = 1_6
A : Tuple = 3_2
def __lowerCamelCase ( __a :Accelerator , __a :int = 1_6 ) -> Tuple:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
A__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__a :Any ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
__a , batched=__a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__a :str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 1_6
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
__a , padding="""longest""" , max_length=__a , pad_to_multiple_of=__a , return_tensors="""pt""" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__a , collate_fn=__a , batch_size=__a )
A__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : List[str] = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( __a :Any , __a :Optional[Any] ) -> List[str]:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __a ) == "1":
A__ = 2
# New Code #
A__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
A__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__a )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["""lr"""]
A__ = int(config["""num_epochs"""] )
A__ = int(config["""seed"""] )
A__ = int(config["""batch_size"""] )
A__ = evaluate.load("""glue""" , """mrpc""" )
set_seed(__a )
A__ , A__ = get_dataloaders(__a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=1_0_0 , num_training_steps=(len(__a ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__a ):
A__ = model(**__a )
A__ = output.loss
accelerator.backward(__a )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**__a )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__a , references=__a , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , __a )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__a , default=__a , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__a , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
A__ = parser.parse_args()
A__ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 274 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A : str = logging.get_logger(__name__)
A : Union[str, Any] = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = '''layoutlmv3'''
def __init__( self : Tuple , __lowerCAmelCase : Optional[int]=5_02_65 , __lowerCAmelCase : Tuple=7_68 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : List[Any]=30_72 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict=5_12 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[str]=1e-5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Tuple=10_24 , __lowerCAmelCase : List[str]=1_28 , __lowerCAmelCase : Optional[int]=1_28 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Any=1_28 , __lowerCAmelCase : str=64 , __lowerCAmelCase : Optional[int]=2_56 , __lowerCAmelCase : int=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : int=2_24 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : Optional[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
A__ = max_ad_position_embeddings
A__ = coordinate_size
A__ = shape_size
A__ = has_relative_attention_bias
A__ = rel_pos_bins
A__ = max_rel_pos
A__ = has_spatial_attention_bias
A__ = rel_ad_pos_bins
A__ = max_rel_ad_pos
A__ = text_embed
A__ = visual_embed
A__ = input_size
A__ = num_channels
A__ = patch_size
A__ = classifier_dropout
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : List[str] = version.parse('''1.12''' )
@property
def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def a_ ( self : Optional[int] ) -> float:
"""simple docstring"""
return 1e-5
@property
def a_ ( self : Tuple ) -> int:
"""simple docstring"""
return 12
def a_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A__ = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase )
A__ = compute_effective_axis_dimension(
__lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
A__ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
A__ = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
A__ = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A__ = dict(
processor(
__lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) )
return inputs
| 274 | 1 |
'''simple docstring'''
from manim import *
class a ( _lowerCamelCase ):
def A_ ( self : Optional[Any] ):
snake_case_ = Rectangle(height=0.5 , width=0.5 )
snake_case_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ = Rectangle(height=0.25 , width=0.25 )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
snake_case_ = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
snake_case_ = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
snake_case_ = Text('''CPU''' , font_size=24 )
snake_case_ = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
snake_case_ = [mem.copy() for i in range(4 )]
snake_case_ = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
snake_case_ = Text('''GPU''' , font_size=24 )
snake_case_ = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
snake_case_ = Text('''Model''' , font_size=24 )
snake_case_ = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
snake_case_ = []
snake_case_ = []
for i, rect in enumerate(lowercase_ ):
snake_case_ = fill.copy().set_fill(lowercase_ , opacity=0.8 )
target.move_to(lowercase_ )
model_arr.append(lowercase_ )
snake_case_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowercase_ )
self.add(*lowercase_ , *lowercase_ )
snake_case_ = [meta_mem.copy() for i in range(6 )]
snake_case_ = [meta_mem.copy() for i in range(6 )]
snake_case_ = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
snake_case_ = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
snake_case_ = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
snake_case_ = Text('''Disk''' , font_size=24 )
snake_case_ = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
disk.move_to([-4, -1.25, 0] )
self.add(lowercase_ , lowercase_ )
snake_case_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
snake_case_ = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowercase_ )
snake_case_ = MarkupText(
F"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) )
snake_case_ = Square(0.3 )
input.set_fill(lowercase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowercase_ , buff=0.5 )
self.play(Write(lowercase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowercase_ , buff=0.02 )
self.play(MoveToTarget(lowercase_ ) )
self.play(FadeOut(lowercase_ ) )
snake_case_ = Arrow(start=lowercase_ , end=lowercase_ , color=lowercase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowercase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
snake_case_ = MarkupText(
F"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ , run_time=3 ) )
snake_case_ = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(lowercase_ ) , Circumscribe(model_arr[0] , color=lowercase_ , **lowercase_ ) , Circumscribe(model_cpu_arr[0] , color=lowercase_ , **lowercase_ ) , Circumscribe(gpu_rect[0] , color=lowercase_ , **lowercase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
snake_case_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowercase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
snake_case_ = AnimationGroup(
FadeOut(lowercase_ , run_time=0.5 ) , MoveToTarget(lowercase_ , run_time=0.5 ) , FadeIn(lowercase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowercase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
snake_case_ = 0.7
self.play(
Circumscribe(model_arr[i] , **lowercase_ ) , Circumscribe(cpu_left_col_base[i] , **lowercase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowercase_ , **lowercase_ ) , Circumscribe(gpu_rect[0] , color=lowercase_ , **lowercase_ ) , Circumscribe(model_arr[i + 1] , color=lowercase_ , **lowercase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowercase_ , **lowercase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowercase_ , **lowercase_ ) , Circumscribe(gpu_rect[0] , color=lowercase_ , **lowercase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
snake_case_ = a_c
snake_case_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowercase_ ) , FadeOut(lowercase_ , run_time=0.5 ) , )
snake_case_ = MarkupText(F"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ , run_time=3 ) , MoveToTarget(lowercase_ ) )
self.wait()
| 72 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Optional[Any] = {'vocab_file': 'spiece.model'}
a : Tuple = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
a : Dict = {'bert_for_seq_generation': 512}
class a ( _lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = []
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Any , lowercase_ : str , lowercase_ : Optional[Any]="<s>" , lowercase_ : Any="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : List[Any]="<pad>" , lowercase_ : List[str]="<::::>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Optional[int] , ):
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , sep_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def A_ ( self : int ):
return self.sp_model.get_piece_size()
def A_ ( self : Union[str, Any] ):
snake_case_ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Any , lowercase_ : Optional[int] ):
snake_case_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self : Any , lowercase_ : str ):
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def A_ ( self : Optional[int] , lowercase_ : Union[str, Any] ):
return self.sp_model.piece_to_id(lowercase_ )
def A_ ( self : Dict , lowercase_ : str ):
snake_case_ = self.sp_model.IdToPiece(lowercase_ )
return token
def A_ ( self : Optional[int] , lowercase_ : List[Any] ):
snake_case_ = []
snake_case_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
snake_case_ = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def A_ ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , '''wb''' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 72 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__A = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__A = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__A = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> tuple[str, float]:
"""simple docstring"""
__lowerCamelCase = len([g for position, g in enumerate(UpperCamelCase__ ) if g == main_target[position]] )
return (item, float(UpperCamelCase__ ))
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> tuple[str, str]:
"""simple docstring"""
__lowerCamelCase = random.randint(0 , len(UpperCamelCase__ ) - 1 )
__lowerCamelCase = parent_a[:random_slice] + parent_a[random_slice:]
__lowerCamelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : list[str] ) -> str:
"""simple docstring"""
__lowerCamelCase = list(UpperCamelCase__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__lowerCamelCase = random.choice(UpperCamelCase__ )
return "".join(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : tuple[str, float] , UpperCamelCase__ : list[tuple[str, float]] , UpperCamelCase__ : list[str] , ) -> list[str]:
"""simple docstring"""
__lowerCamelCase = []
# Generate more children proportionally to the fitness score.
__lowerCamelCase = int(parent_a[1] * 100 ) + 1
__lowerCamelCase = 10 if child_n >= 10 else child_n
for _ in range(UpperCamelCase__ ):
__lowerCamelCase = population_score[random.randint(0 , UpperCamelCase__ )][0]
__lowerCamelCase , __lowerCamelCase = crossover(parent_a[0] , UpperCamelCase__ )
# Append new string to the population list.
pop.append(mutate(UpperCamelCase__ , UpperCamelCase__ ) )
pop.append(mutate(UpperCamelCase__ , UpperCamelCase__ ) )
return pop
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : list[str] , UpperCamelCase__ : bool = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
__lowerCamelCase = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(UpperCamelCase__ )
# Verify that the target contains no genes besides the ones inside genes variable.
__lowerCamelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__lowerCamelCase = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(UpperCamelCase__ )
# Generate random starting population.
__lowerCamelCase = []
for _ in range(UpperCamelCase__ ):
population.append(''.join([random.choice(UpperCamelCase__ ) for i in range(len(UpperCamelCase__ ) )] ) )
# Just some logs to know what the algorithms is doing.
__lowerCamelCase , __lowerCamelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(UpperCamelCase__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__lowerCamelCase = [evaluate(UpperCamelCase__ , UpperCamelCase__ ) for item in population]
# Check if there is a matching evolution.
__lowerCamelCase = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] , reverse=UpperCamelCase__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__lowerCamelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(UpperCamelCase__ )
# Normalize population score to be between 0 and 1.
__lowerCamelCase = [
(item, score / len(UpperCamelCase__ )) for item, score in population_score
]
# This is selection
for i in range(UpperCamelCase__ ):
population.extend(select(population_score[int(UpperCamelCase__ )] , UpperCamelCase__ , UpperCamelCase__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(UpperCamelCase__ ) > N_POPULATION:
break
if __name__ == "__main__":
__A = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
__A = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
__A , __A , __A = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 90 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
__lowerCamelCase : Dict = img
__lowerCamelCase : Any = img.shape[1]
__lowerCamelCase : Optional[int] = img.shape[0]
__lowerCamelCase : Dict = dst_width
__lowerCamelCase : str = dst_height
__lowerCamelCase : Dict = self.src_w / self.dst_w
__lowerCamelCase : List[Any] = self.src_h / self.dst_h
__lowerCamelCase : Optional[int] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def lowercase_ ( self ) -> List[Any]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowerCamelCase : Union[str, Any] = self.img[self.get_y(SCREAMING_SNAKE_CASE_ )][self.get_x(SCREAMING_SNAKE_CASE_ )]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return int(self.ratio_x * x )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
A__ , A__ : Optional[Any] = 800, 600
A__ : List[str] = imread("""image_data/lena.jpg""", 1)
A__ : List[Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 185 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a : str= {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any= ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any= [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_a : int= _LazyModule(__name__, globals()["__file__"], _import_structure)
| 367 | """simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_a : List[Any]= logging.get_logger(__name__)
_a : Any= {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_a : int= {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
_a : Optional[Any]= {
"junnyu/roformer_chinese_small": 1_536,
"junnyu/roformer_chinese_base": 1_536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
_a : str= {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : int = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : Dict = RoFormerTokenizer
def __init__(self : List[Any] , _A : Any=None , _A : int=None , _A : Dict=True , _A : List[Any]="[UNK]" , _A : Tuple="[SEP]" , _A : List[Any]="[PAD]" , _A : str="[CLS]" , _A : int="[MASK]" , _A : Optional[int]=True , _A : List[str]=None , **_A : int , ) -> Dict:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
__snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get('lowercase' , _A) != do_lower_case
or pre_tok_state.get('strip_accents' , _A) != strip_accents
):
__snake_case : Union[str, Any] = getattr(_A , pre_tok_state.pop('type'))
__snake_case : Union[str, Any] = do_lower_case
__snake_case : str = strip_accents
__snake_case : Optional[int] = pre_tok_class(**_A)
__snake_case : int = do_lower_case
def __getstate__(self : Optional[Any]) -> Dict:
__snake_case : Optional[int] = self.__dict__.copy()
__snake_case : int = BertPreTokenizer()
return state
def __setstate__(self : Optional[Any] , _A : Optional[Any]) -> Dict:
__snake_case : List[str] = d
__snake_case : str = self.__dict__['_tokenizer'].get_vocab()
__snake_case : int = PreTokenizer.custom(JiebaPreTokenizer(_A))
def _lowercase (self : int , _A : Tuple , _A : Any=None) -> str:
__snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase (self : List[str] , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowercase (self : List[Any] , _A : str , _A : Optional[str] = None) -> Tuple[str]:
__snake_case : List[Any] = self._tokenizer.model.save(_A , name=_A)
return tuple(_A)
def _lowercase (self : int , _A : Optional[int] , _A : Tuple=None , _A : Tuple=None , _A : Dict=False , **_A : Optional[int] , ) -> Optional[Any]:
__snake_case : Optional[Any] = BertPreTokenizer()
return super().save_pretrained(_A , _A , _A , _A , **_A)
| 95 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Any =(DDIMParallelScheduler,)
UpperCamelCase__ : int =(('eta', 0.0), ('num_inference_steps', 50))
def lowerCamelCase ( self : str , **lowercase_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple ={
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowercase_ )
return config
def lowerCamelCase ( self : str , **lowercase_ : List[str] ) -> str:
"""simple docstring"""
_lowerCamelCase : Dict =self.scheduler_classes[0]
_lowerCamelCase : Optional[int] =self.get_scheduler_config(**lowercase_ )
_lowerCamelCase : Dict =scheduler_class(**lowercase_ )
_lowerCamelCase , _lowerCamelCase : Tuple =10, 0.0
_lowerCamelCase : Dict =self.dummy_model()
_lowerCamelCase : Tuple =self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for t in scheduler.timesteps:
_lowerCamelCase : int =model(lowercase_ , lowercase_ )
_lowerCamelCase : Optional[Any] =scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def lowerCamelCase ( self : str ) -> str:
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def lowerCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
_lowerCamelCase : int =self.scheduler_classes[0]
_lowerCamelCase : str =self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : Optional[int] =scheduler_class(**lowercase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def lowerCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def lowerCamelCase ( self : int ) -> Dict:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowercase_ )
def lowerCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowercase_ )
def lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=lowercase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , )
def lowerCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowercase_ )
def lowerCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowercase_ , num_inference_steps=lowercase_ )
def lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowercase_ , eta=lowercase_ )
def lowerCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Dict =self.scheduler_classes[0]
_lowerCamelCase : Optional[int] =self.get_scheduler_config()
_lowerCamelCase : Optional[int] =scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def lowerCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Any =self.scheduler_classes[0]
_lowerCamelCase : List[str] =self.get_scheduler_config()
_lowerCamelCase : Tuple =scheduler_class(**lowercase_ )
_lowerCamelCase , _lowerCamelCase : List[Any] =10, 0.0
scheduler.set_timesteps(lowercase_ )
_lowerCamelCase : Optional[Any] =self.dummy_model()
_lowerCamelCase : Dict =self.dummy_sample_deter
_lowerCamelCase : Union[str, Any] =self.dummy_sample_deter + 0.1
_lowerCamelCase : Tuple =self.dummy_sample_deter - 0.1
_lowerCamelCase : List[Any] =samplea.shape[0]
_lowerCamelCase : Union[str, Any] =torch.stack([samplea, samplea, samplea] , dim=0 )
_lowerCamelCase : List[Any] =torch.arange(lowercase_ )[0:3, None].repeat(1 , lowercase_ )
_lowerCamelCase : Any =model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowerCamelCase : int =scheduler.batch_step_no_noise(lowercase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase_ )
_lowerCamelCase : List[str] =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : Optional[Any] =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : int =self.full_loop()
_lowerCamelCase : List[str] =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : Tuple =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def lowerCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =self.full_loop(prediction_type='v_prediction' )
_lowerCamelCase : Any =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : Tuple =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def lowerCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
_lowerCamelCase : int =self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
_lowerCamelCase : Optional[Any] =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : List[Any] =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
_lowerCamelCase : List[Any] =torch.sum(torch.abs(lowercase_ ) )
_lowerCamelCase : Tuple =torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 199 |
'''simple docstring'''
import sys
a_ : Dict = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def _A (lowerCAmelCase__ :str = N ) -> int:
'''simple docstring'''
_a = -sys.maxsize - 1
for i in range(len(lowerCAmelCase__ ) - 12 ):
_a = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_a = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 168 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
UpperCAmelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase_ = model_type_to_module_name(__lowerCAmelCase )
lowercase_ = importlib.import_module(F'''.{module_name}''' , """transformers.models""" )
try:
return getattr(__lowerCAmelCase , __lowerCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__lowerCAmelCase , """__name__""" , __lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase_ = importlib.import_module("""transformers""" )
if hasattr(__lowerCAmelCase , __lowerCAmelCase ):
return getattr(__lowerCAmelCase , __lowerCAmelCase )
return None
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , **__lowerCAmelCase , ) -> int:
'''simple docstring'''
lowercase_ = get_file_from_repo(
__lowerCAmelCase , __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , resume_download=__lowerCAmelCase , proxies=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(__lowerCAmelCase , encoding="""utf-8""" ) as reader:
return json.load(__lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any):
"""simple docstring"""
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""")
@classmethod
@replace_list_option_in_docstrings(lowerCAmelCase_)
def _UpperCAmelCase ( cls : List[Any] , lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = kwargs.pop("""config""" , lowerCAmelCase_)
lowercase_ = kwargs.pop("""trust_remote_code""" , lowerCAmelCase_)
lowercase_ = True
lowercase_ , lowercase_ = FeatureExtractionMixin.get_feature_extractor_dict(lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = config_dict.get("""feature_extractor_type""" , lowerCAmelCase_)
lowercase_ = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {}):
lowercase_ = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = AutoConfig.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_)
# It could be in `config.feature_extractor_type``
lowercase_ = getattr(lowerCAmelCase_ , """feature_extractor_type""" , lowerCAmelCase_)
if hasattr(lowerCAmelCase_ , """auto_map""") and "AutoFeatureExtractor" in config.auto_map:
lowercase_ = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
lowercase_ = feature_extractor_class_from_name(lowerCAmelCase_)
lowercase_ = feature_extractor_auto_map is not None
lowercase_ = feature_extractor_class is not None or type(lowerCAmelCase_) in FEATURE_EXTRACTOR_MAPPING
lowercase_ = resolve_trust_remote_code(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if has_remote_code and trust_remote_code:
lowercase_ = get_class_from_dynamic_module(
lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = kwargs.pop("""code_revision""" , lowerCAmelCase_)
if os.path.isdir(lowerCAmelCase_):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCAmelCase_ , **lowerCAmelCase_)
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCAmelCase_ , **lowerCAmelCase_)
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCAmelCase_) in FEATURE_EXTRACTOR_MAPPING:
lowercase_ = FEATURE_EXTRACTOR_MAPPING[type(lowerCAmelCase_)]
return feature_extractor_class.from_dict(lowerCAmelCase_ , **lowerCAmelCase_)
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}''')
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(lowerCAmelCase_ , lowerCAmelCase_)
| 313 |
"""simple docstring"""
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase_ = 0 if allow_empty_subarrays else float("""-inf""" )
lowercase_ = 0.0
for num in arr:
lowercase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase_ = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase : Union[str, Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 313 | 1 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=[] ):
a__ = size[0] - overlap_pixels * 2
a__ = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
a__ = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_5_5
a__ = np.pad(SCREAMING_SNAKE_CASE__ , mode='linear_ramp' , pad_width=SCREAMING_SNAKE_CASE__ , end_values=0 )
if "l" in remove_borders:
a__ = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
a__ = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
a__ = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
a__ = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
return max(SCREAMING_SNAKE_CASE__ , min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
a__ = list(SCREAMING_SNAKE_CASE__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
a__ = clamp_rect(SCREAMING_SNAKE_CASE__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
a__ = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE__ , (original_slice, 0) )
return result
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ):
a__ = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
a__ = tile.crop(SCREAMING_SNAKE_CASE__ )
return tile
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : int ):
a__ = n % d
return n - divisor
class snake_case_ (_snake_case ):
def __init__( self :int ,__snake_case :Optional[int] ,__snake_case :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Union[str, Any] ,__snake_case :int ,__snake_case :Tuple ,__snake_case :List[Any] = 3_50 ,) -> Optional[int]:
super().__init__(
vae=__snake_case ,text_encoder=__snake_case ,tokenizer=__snake_case ,unet=__snake_case ,low_res_scheduler=__snake_case ,scheduler=__snake_case ,max_noise_level=__snake_case ,)
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :Optional[Any] ,__snake_case :List[Any] ,__snake_case :List[str] ,__snake_case :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Tuple ,**__snake_case :Optional[int] ) -> Tuple:
torch.manual_seed(0 )
a__ = (
min(image.size[0] - (tile_size + original_image_slice) ,x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) ,y * tile_size ),
min(image.size[0] ,(x + 1) * tile_size ),
min(image.size[1] ,(y + 1) * tile_size ),
)
a__ = add_overlap_rect(__snake_case ,__snake_case ,image.size )
a__ = image.crop(__snake_case )
a__ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
a__ = translated_slice_x - (original_image_slice / 2)
a__ = max(0 ,__snake_case )
a__ = squeeze_tile(__snake_case ,__snake_case ,__snake_case ,__snake_case )
a__ = to_input.size
a__ = to_input.resize((tile_size, tile_size) ,Image.BICUBIC )
a__ = super(__snake_case ,self ).__call__(image=__snake_case ,**__snake_case ).images[0]
a__ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) ,Image.BICUBIC )
a__ = unsqueeze_tile(__snake_case ,__snake_case )
a__ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) ,Image.BICUBIC )
a__ = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
a__ = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) ,tile_border * 4 ,remove_borders=__snake_case ) ,mode='L' ,)
final_image.paste(
__snake_case ,(crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) ,__snake_case )
@torch.no_grad()
def __call__( self :List[Any] ,__snake_case :Optional[Any] ,__snake_case :Dict ,__snake_case :Any = 75 ,__snake_case :List[str] = 9.0 ,__snake_case :List[str] = 50 ,__snake_case :Any = None ,__snake_case :Dict = 1 ,__snake_case :Tuple = 0.0 ,__snake_case :Any = None ,__snake_case :Dict = None ,__snake_case :Optional[Any] = None ,__snake_case :Union[str, Any] = 1 ,__snake_case :Tuple = 1_28 ,__snake_case :int = 32 ,__snake_case :Union[str, Any] = 32 ,) -> List[str]:
a__ = Image.new('RGB' ,(image.size[0] * 4, image.size[1] * 4) )
a__ = math.ceil(image.size[0] / tile_size )
a__ = math.ceil(image.size[1] / tile_size )
a__ = tcx * tcy
a__ = 0
for y in range(__snake_case ):
for x in range(__snake_case ):
self._process_tile(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,prompt=__snake_case ,num_inference_steps=__snake_case ,guidance_scale=__snake_case ,noise_level=__snake_case ,negative_prompt=__snake_case ,num_images_per_prompt=__snake_case ,eta=__snake_case ,generator=__snake_case ,latents=__snake_case ,)
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def __lowercase ( ):
a__ = """stabilityai/stable-diffusion-x4-upscaler"""
a__ = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE__ , revision='fp16' , torch_dtype=torch.floataa )
a__ = pipe.to('cuda' )
a__ = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(__lowerCAmelCase : str ):
print(F'progress: {obj["progress"]:.4f}' )
obj["image"].save('diffusers_library_progress.jpg' )
a__ = pipe(image=SCREAMING_SNAKE_CASE__ , prompt='Black font, white background, vector' , noise_level=4_0 , callback=SCREAMING_SNAKE_CASE__ )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 240 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 100_0000 , SCREAMING_SNAKE_CASE__ = 10 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : defaultdict = defaultdict(SCREAMING_SNAKE_CASE__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_SCREAMING_SNAKE_CASE : int = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_SCREAMING_SNAKE_CASE : List[str] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 200 | 0 |
'''simple docstring'''
def __A ( lowerCAmelCase_ ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170 |
'''simple docstring'''
import numpy as np
def __A ( lowerCAmelCase_ ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170 | 1 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowercase_ = logging.getLogger(__name__)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self ):
__a = False
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
if not self.initialized:
__a = RagRetriever(
_a , question_encoder_tokenizer=_a , generator_tokenizer=_a , index=_a , init_retrieval=_a , )
__a = True
def __UpperCAmelCase ( self ):
self.retriever.index.init_index()
def __UpperCAmelCase ( self , _a , _a ):
__a , __a = self.retriever._main_retrieve(_a , _a )
return doc_ids, retrieved_doc_embeds
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a=None ):
if index is not None and index.is_initialized() and len(_a ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
_a , question_encoder_tokenizer=_a , generator_tokenizer=_a , index=_a , init_retrieval=_a , )
__a = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_a , _a , _a , _a )
for worker in self.retrieval_workers
] )
def __UpperCAmelCase ( self ):
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __UpperCAmelCase ( self , _a , _a ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__a = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__a , __a = ray.get(random_worker.retrieve.remote(_a , _a ) )
else:
__a , __a = self._main_retrieve(_a , _a )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_a )
@classmethod
def __UpperCAmelCase ( cls , _a , _a=None , **_a ):
return super(_a , cls ).get_tokenizers(_a , _a , **_a )
@classmethod
def __UpperCAmelCase ( cls , _a , _a , _a=None , **_a ):
__a = kwargs.pop('''config''' , _a ) or RagConfig.from_pretrained(_a , **_a )
__a = RagTokenizer.from_pretrained(_a , config=_a )
__a = rag_tokenizer.question_encoder
__a = rag_tokenizer.generator
if indexed_dataset is not None:
__a = '''custom'''
__a = CustomHFIndex(config.retrieval_vector_size , _a )
else:
__a = cls._build_index(_a )
return cls(
_a , question_encoder_tokenizer=_a , generator_tokenizer=_a , retrieval_workers=_a , index=_a , )
| 45 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowerCAmelCase_ ( _lowercase : str , _lowercase : str , **_lowercase : Optional[Any]) -> Optional[int]:
"""simple docstring"""
a__ : List[Any] = AutoConfig.from_pretrained(_lowercase , **_lowercase)
a__ : Dict = AutoModelForSeqaSeqLM.from_config(_lowercase)
model.save_pretrained(_lowercase)
AutoTokenizer.from_pretrained(_lowercase).save_pretrained(_lowercase)
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 170 | 0 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def a_ ( __lowercase : List[Any] , __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> List[Any]:
_snake_case = hf_hub_url(repo_id=__SCREAMING_SNAKE_CASE , path=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__SCREAMING_SNAKE_CASE )}''' | 370 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCamelCase : int = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase : Optional[str] = field(
default="NER" ,metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCAmelCase : bool = field(default=UpperCAmelCase ,metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} ,)
_UpperCAmelCase : int = field(
default=1_2_8 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
_UpperCAmelCase : bool = field(
default=UpperCAmelCase ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
def a_ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case , _snake_case , _snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case , _snake_case , _snake_case = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
_snake_case = import_module('tasks' )
try:
_snake_case = getattr(__lowercase , model_args.task_type )
_snake_case = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __lowercase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_snake_case = token_classification_task.get_labels(data_args.labels )
_snake_case = dict(enumerate(__lowercase ) )
_snake_case = len(__lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowercase , idalabel=__lowercase , labelaid={label: i for i, label in enumerate(__lowercase )} , cache_dir=model_args.cache_dir , )
_snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_snake_case = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
_snake_case = (
TokenClassificationDataset(
token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_snake_case = (
TokenClassificationDataset(
token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowercase : np.ndarray , __lowercase : np.ndarray ) -> Tuple[List[int], List[int]]:
_snake_case = np.argmax(__lowercase , axis=2 )
_snake_case , _snake_case = preds.shape
_snake_case = [[] for _ in range(__lowercase )]
_snake_case = [[] for _ in range(__lowercase )]
for i in range(__lowercase ):
for j in range(__lowercase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowercase : EvalPrediction ) -> Dict:
_snake_case , _snake_case = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowercase , __lowercase ),
"precision": precision_score(__lowercase , __lowercase ),
"recall": recall_score(__lowercase , __lowercase ),
"f1": fa_score(__lowercase , __lowercase ),
}
# Data collator
_snake_case = DataCollatorWithPadding(__lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_snake_case = Trainer(
model=__lowercase , args=__lowercase , train_dataset=__lowercase , eval_dataset=__lowercase , compute_metrics=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_snake_case = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_snake_case = trainer.evaluate()
_snake_case = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(__lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __lowercase , __lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(__lowercase )
# Predict
if training_args.do_predict:
_snake_case = TokenClassificationDataset(
token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_snake_case , _snake_case , _snake_case = trainer.predict(__lowercase )
_snake_case , _snake_case = align_predictions(__lowercase , __lowercase )
_snake_case = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(__lowercase , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , __lowercase , __lowercase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
_snake_case = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(__lowercase , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(__lowercase , __lowercase , __lowercase )
return results
def a_ ( __lowercase : Optional[Any] ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 130 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase_ = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class snake_case :
'''simple docstring'''
A_ : List[str] = PegasusConfig
A_ : Dict = {}
A_ : Union[str, Any] = "gelu"
def __init__( self : int, _lowerCamelCase : Any, _lowerCamelCase : str=13, _lowerCamelCase : Tuple=7, _lowerCamelCase : int=True, _lowerCamelCase : Any=False, _lowerCamelCase : str=99, _lowerCamelCase : int=32, _lowerCamelCase : Union[str, Any]=5, _lowerCamelCase : str=4, _lowerCamelCase : str=37, _lowerCamelCase : Optional[int]=0.1, _lowerCamelCase : Dict=0.1, _lowerCamelCase : Optional[int]=20, _lowerCamelCase : Any=2, _lowerCamelCase : Tuple=1, _lowerCamelCase : List[Any]=0, ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = eos_token_id
__A = pad_token_id
__A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ).clip(3, self.vocab_size )
__A = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ), 1 )
__A = np.concatenate([input_ids, eos_tensor], axis=1 )
__A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
__A = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
__A = prepare_pegasus_inputs_dict(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : int, _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__A = 20
__A = model_class_name(_lowerCamelCase )
__A = model.encode(inputs_dict['''input_ids'''] )
__A , __A = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__A = model.init_cache(decoder_input_ids.shape[0], _lowerCamelCase, _lowerCamelCase )
__A = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype='''i4''' )
__A = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
__A = model.decode(
decoder_input_ids[:, :-1], _lowerCamelCase, decoder_attention_mask=_lowerCamelCase, past_key_values=_lowerCamelCase, decoder_position_ids=_lowerCamelCase, )
__A = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
__A = model.decode(
decoder_input_ids[:, -1:], _lowerCamelCase, decoder_attention_mask=_lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_position_ids=_lowerCamelCase, )
__A = model.decode(_lowerCamelCase, _lowerCamelCase )
__A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3, msg=f'Max diff is {diff}' )
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : List[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : List[str] ):
'''simple docstring'''
__A = 20
__A = model_class_name(_lowerCamelCase )
__A = model.encode(inputs_dict['''input_ids'''] )
__A , __A = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__A = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
], axis=-1, )
__A = model.init_cache(decoder_input_ids.shape[0], _lowerCamelCase, _lowerCamelCase )
__A = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
__A = model.decode(
decoder_input_ids[:, :-1], _lowerCamelCase, decoder_attention_mask=_lowerCamelCase, past_key_values=_lowerCamelCase, decoder_position_ids=_lowerCamelCase, )
__A = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
__A = model.decode(
decoder_input_ids[:, -1:], _lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=_lowerCamelCase, decoder_position_ids=_lowerCamelCase, )
__A = model.decode(_lowerCamelCase, _lowerCamelCase, decoder_attention_mask=_lowerCamelCase )
__A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3, msg=f'Max diff is {diff}' )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ):
"""simple docstring"""
if attention_mask is None:
__A = np.not_equal(__UpperCamelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__A = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
A_ : Union[str, Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
A_ : Any = True
A_ : Union[str, Any] = False
A_ : Dict = False
A_ : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = FlaxPegasusModelTester(self )
__A = ConfigTester(self, config_class=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase )
__A = model_class(_lowerCamelCase )
@jax.jit
def encode_jitted(_lowerCamelCase : Optional[int], _lowerCamelCase : Optional[int]=None, **_lowerCamelCase : Dict ):
return model.encode(input_ids=_lowerCamelCase, attention_mask=_lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
__A = encode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__A = encode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ), len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase, _lowerCamelCase ):
self.assertEqual(jitted_output.shape, output.shape )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__A = model_class(_lowerCamelCase )
__A = model.encode(inputs_dict['''input_ids'''], inputs_dict['''attention_mask'''] )
__A = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCamelCase : int, _lowerCamelCase : List[str], _lowerCamelCase : Optional[Any] ):
return model.decode(
decoder_input_ids=_lowerCamelCase, decoder_attention_mask=_lowerCamelCase, encoder_outputs=_lowerCamelCase, )
with self.subTest('''JIT Enabled''' ):
__A = decode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__A = decode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ), len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase, _lowerCamelCase ):
self.assertEqual(jitted_output.shape, output.shape )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__A = model_class_name.from_pretrained('''google/pegasus-large''', from_pt=_lowerCamelCase )
__A = np.ones((1, 1) )
__A = model(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
__A = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
__A = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__A = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
__A = tokenizer(_lowerCamelCase, return_tensors='''np''', truncation=_lowerCamelCase, max_length=5_12, padding=_lowerCamelCase )
__A = model.generate(**_lowerCamelCase, num_beams=2 ).sequences
__A = tokenizer.batch_decode(_lowerCamelCase, skip_special_tokens=_lowerCamelCase )
assert tgt_text == decoded
| 266 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ = logging.get_logger(__name__)
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int], *_lowerCamelCase : Union[str, Any], **_lowerCamelCase : Dict ):
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''', _lowerCamelCase, )
super().__init__(*_lowerCamelCase, **_lowerCamelCase )
| 266 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 189 |
from typing import Any
class a :
def __init__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = data
_UpperCAmelCase : Any = None
class a :
def __init__( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase : str = temp.next
print()
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = Node(A_ )
_UpperCAmelCase : Tuple = self.head
_UpperCAmelCase : Tuple = new_node
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase : int = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : Tuple = node_a.next
_UpperCAmelCase : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : List[Any] = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 189 | 1 |
__lowercase = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 43 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'BlipImageProcessor'
__SCREAMING_SNAKE_CASE : List[Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__(self , lowercase , lowercase ):
A_ : List[Any] = False
super().__init__(lowercase , lowercase )
A_ : Tuple = self.image_processor
def __call__(self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
A_ : Optional[Any] = self.tokenizer
A_ : Tuple = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
return text_encoding
# add pixel_values
A_ : int = self.image_processor(lowercase , return_tensors=lowercase )
if text is not None:
A_ : Optional[Any] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
else:
A_ : List[str] = None
if text_encoding is not None:
encoding_image_processor.update(lowercase )
return encoding_image_processor
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
A_ : int = self.tokenizer.model_input_names
A_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 206 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any]=7 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : List[str]=18 , __UpperCAmelCase : Union[str, Any]=30 , __UpperCAmelCase : Union[str, Any]=400 , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Union[str, Any]=None , ):
'''simple docstring'''
_A = size if size is not None else {"height": 20, "width": 20}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = size
_A = do_normalize
_A = do_convert_rgb
_A = [512, 1024, 2048, 4096]
_A = patch_size if patch_size is not None else {"height": 16, "width": 16}
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
_A = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = PixaStructImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_convert_rgb" ) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.image_processor_tester.prepare_dummy_image()
_A = self.image_processing_class(**self.image_processor_dict )
_A = 2048
_A = image_processor(__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
_A = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__UpperCAmelCase ):
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
_A = "Hello"
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase , header_text=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase , header_text=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = PixaStructImageProcessingTester(self , num_channels=4 )
_A = 3
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_convert_rgb" ) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 174 |
'''simple docstring'''
lowerCamelCase_ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCamelCase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCamelCase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 174 | 1 |
from __future__ import annotations
import os
from typing import Any
import requests
__UpperCamelCase : Any = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__UpperCamelCase : Tuple = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__UpperCamelCase : List[str] = os.environ.get("USER_TOKEN", "")
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : Dict = {
'''Authorization''': F"token {auth_token}",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(snake_case_ , headers=snake_case_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"{key}: {value}")
else:
raise ValueError("\'USER_TOKEN\' field cannot be empty.")
| 146 | '''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=64 , SCREAMING_SNAKE_CASE__ : int=None ) -> Optional[int]:
__lowerCAmelCase = np.random.default_rng(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = length
__lowerCAmelCase = rng.normal(size=(length,) ).astype(np.floataa )
__lowerCAmelCase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Union[str, Any] ) -> Optional[Any]:
return self.length
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
return {"x": self.x[i], "y": self.y[i]}
class _lowercase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Any:
super().__init__()
__lowerCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowerCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowerCAmelCase = True
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=None ) -> str:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__lowerCAmelCase = False
return x * self.a[0] + self.b[0]
class _lowercase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Optional[Any]:
super().__init__()
__lowerCAmelCase = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ).float() )
__lowerCAmelCase = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ).float() )
__lowerCAmelCase = True
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=None ) -> int:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__lowerCAmelCase = False
return x * self.a + self.b
def UpperCamelCase_ ( snake_case_ : List[str] , snake_case_ : int = 16 ) -> int:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
__lowerCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowerCAmelCase = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
__lowerCAmelCase = load_dataset("""csv""" , data_files=snake_case_ )
__lowerCAmelCase = datasets["""train"""].unique("""label""" )
__lowerCAmelCase = {v: i for i, v in enumerate(snake_case_ )}
def tokenize_function(snake_case_ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case_ , max_length=snake_case_ , padding="""max_length""" )
if "label" in examples:
__lowerCAmelCase = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCAmelCase = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(snake_case_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(snake_case_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(tokenized_datasets["""train"""] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=2 )
__lowerCAmelCase = DataLoader(tokenized_datasets["""validation"""] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=1 )
return train_dataloader, eval_dataloader
| 229 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__SCREAMING_SNAKE_CASE = 3
def UpperCAmelCase ( _lowerCamelCase ):
print("Generating primitive root of p" )
while True:
A : str = random.randrange(3 , _lowerCamelCase )
if pow(_lowerCamelCase , 2 , _lowerCamelCase ) == 1:
continue
if pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) == 1:
continue
return g
def UpperCAmelCase ( _lowerCamelCase ):
print("Generating prime p..." )
A : int = rabin_miller.generate_large_prime(_lowerCamelCase ) # select large prime number.
A : List[str] = primitive_root(_lowerCamelCase ) # one primitive root on modulo p.
A : int = random.randrange(3 , _lowerCamelCase ) # private_key -> have to be greater than 2 for safety.
A : Tuple = cryptomath.find_mod_inverse(pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
A : int = (key_size, e_a, e_a, p)
A : str = (key_size, d)
return public_key, private_key
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
A : Any = generate_key(_lowerCamelCase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , "w" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , "w" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def UpperCAmelCase ( ):
print("Making key files..." )
make_key_files("elgamal" , 2048 )
print("Key files generation successful" )
if __name__ == "__main__":
main() | 360 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = CLIPConfig
a__ = ["CLIPEncoderLayer"]
def __init__( self : Optional[Any] , __lowerCamelCase : CLIPConfig ) -> Tuple:
super().__init__(__lowerCamelCase )
A : List[Any] = CLIPVisionModelWithProjection(config.vision_config )
A : List[str] = nn.Linear(config.vision_config.projection_dim , 1 )
A : Optional[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]=0.5 , __lowerCamelCase : Dict=0.5 ) -> Optional[int]:
A : List[str] = self.vision_model(__lowerCamelCase )[0]
A : Dict = self.p_head(__lowerCamelCase )
A : Dict = nsfw_detected.flatten()
A : Any = nsfw_detected > p_threshold
A : Optional[int] = nsfw_detected.tolist()
if any(__lowerCamelCase ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(__lowerCamelCase ):
if nsfw_detected_:
A : List[str] = np.zeros(images[idx].shape )
A : List[str] = self.w_head(__lowerCamelCase )
A : str = watermark_detected.flatten()
A : List[Any] = watermark_detected > w_threshold
A : List[Any] = watermark_detected.tolist()
if any(__lowerCamelCase ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(__lowerCamelCase ):
if watermark_detected_:
A : List[str] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected | 256 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __UpperCAmelCase ( __lowerCamelCase ):
def __init__( self ):
"""simple docstring"""
_snake_case = []
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
self.events.append('on_init_end' )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
self.events.append('on_train_begin' )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
self.events.append('on_train_end' )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
self.events.append('on_epoch_begin' )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
self.events.append('on_epoch_end' )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
self.events.append('on_step_begin' )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
self.events.append('on_step_end' )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
self.events.append('on_evaluate' )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
self.events.append('on_predict' )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
self.events.append('on_save' )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
self.events.append('on_log' )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
self.events.append('on_prediction_step' )
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = tempfile.mkdtemp()
def lowerCamelCase ( self ):
"""simple docstring"""
shutil.rmtree(self.output_dir )
def lowerCamelCase ( self , lowerCAmelCase_=0 , lowerCAmelCase_=0 , lowerCAmelCase_=64 , lowerCAmelCase_=64 , lowerCAmelCase_=None , lowerCAmelCase_=False , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = RegressionDataset(length=lowerCAmelCase_ )
_snake_case = RegressionDataset(length=lowerCAmelCase_ )
_snake_case = RegressionModelConfig(a=lowerCAmelCase_ , b=lowerCAmelCase_ )
_snake_case = RegressionPreTrainedModel(lowerCAmelCase_ )
_snake_case = TrainingArguments(self.output_dir , disable_tqdm=lowerCAmelCase_ , report_to=[] , **lowerCAmelCase_ )
return Trainer(
lowerCAmelCase_ , lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , callbacks=lowerCAmelCase_ , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
# Order doesn't matter
_snake_case = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : cb.__name__ if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cb.__class__.__name__ )
_snake_case = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : cb.__name__ if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cb.__class__.__name__ )
for cba, cba in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(lowerCAmelCase_ , cba.__class__ )
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(cba.__class__ , lowerCAmelCase_ )
else:
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = ['on_init_end', 'on_train_begin']
_snake_case = 0
_snake_case = len(trainer.get_eval_dataloader() )
_snake_case = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(lowerCAmelCase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_trainer()
_snake_case = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
# Callbacks passed at init are added to the default callbacks
_snake_case = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
_snake_case = self.get_trainer(disable_tqdm=lowerCAmelCase_ )
_snake_case = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
_snake_case = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowerCAmelCase_ )
expected_callbacks.remove(lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
_snake_case = self.get_trainer()
_snake_case = trainer.pop_callback(lowerCAmelCase_ )
self.assertEqual(cb.__class__ , lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
trainer.add_callback(lowerCAmelCase_ )
expected_callbacks.insert(0 , lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
# We can also add, pop, or remove by instance
_snake_case = self.get_trainer()
_snake_case = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowerCAmelCase_ )
expected_callbacks.remove(lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
_snake_case = self.get_trainer()
_snake_case = trainer.callback_handler.callbacks[0]
_snake_case = trainer.pop_callback(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
trainer.add_callback(lowerCAmelCase_ )
expected_callbacks.insert(0 , lowerCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=lowerCAmelCase_ )
_snake_case = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
_snake_case = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_ ) )
# Independent log/save/eval
_snake_case = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
_snake_case = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_ ) )
_snake_case = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
_snake_case = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_ ) )
_snake_case = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
_snake_case = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_ ) )
_snake_case = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
_snake_case = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_ ) )
# A bit of everything
_snake_case = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
_snake_case = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCAmelCase_ , self.get_expected_events(lowerCAmelCase_ ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
_snake_case = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowerCAmelCase_ ) in warn_mock.call_args[0][0]
| 42 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = test_results.split(' ' )
lowercase = 0
lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowercase = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__SCREAMING_SNAKE_CASE ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = {}
lowercase = None
lowercase = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , __SCREAMING_SNAKE_CASE ):
lowercase = True
lowercase = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
lowercase = line
lowercase = False
return failures
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case ):
lowercase = title
lowercase = doc_test_results['time_spent'].split(',' )[0]
lowercase = doc_test_results['success']
lowercase = doc_test_results['failures']
lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowercase = doc_test_results
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [self._time_spent]
lowercase = 0
for time in time_spent:
lowercase = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(snake_case ) == 1:
lowercase = [0, 0, time_parts[0]]
lowercase , lowercase , lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
lowercase , lowercase , lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(snake_case )}h{int(snake_case )}m{int(snake_case )}s'''
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 40
lowercase = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(snake_case , snake_case )}
lowercase = ''
for category, failures in category_failures.items():
if len(snake_case ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(snake_case )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(snake_case )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ):
lowercase = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(snake_case )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
lowercase = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else 'All tests passed.'
lowercase = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
lowercase = ''
for key, value in failures.items():
lowercase = value[:200] + ' [Truncated]' if len(snake_case ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
lowercase = job_name
lowercase = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
lowercase = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def SCREAMING_SNAKE_CASE__ ( self ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
lowercase = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
lowercase = sorted(self.doc_test_results.items() , key=lambda snake_case : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
lowercase = F'''*Num failures* :{len(job_result['failed'] )} \n'''
lowercase = job_result['failures']
lowercase = self.get_reply_blocks(snake_case , snake_case , snake_case , text=snake_case )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'''Results for {job}''' , blocks=snake_case , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def UpperCAmelCase_ ( ):
lowercase = os.environ['GITHUB_RUN_ID']
lowercase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
lowercase = requests.get(__SCREAMING_SNAKE_CASE ).json()
lowercase = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
lowercase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(__SCREAMING_SNAKE_CASE ):
lowercase = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , __SCREAMING_SNAKE_CASE )
return {}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = {}
if os.path.exists(__SCREAMING_SNAKE_CASE ):
lowercase = os.listdir(__SCREAMING_SNAKE_CASE )
for file in files:
try:
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , encoding='utf-8' ) as f:
lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}.''' ) from e
return _artifact
def UpperCAmelCase_ ( ):
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = name
lowercase = []
def __str__( self ):
return self.name
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
self.paths.append({'name': self.name, 'path': path} )
lowercase = {}
lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowercase = directory
if artifact_name not in _available_artifacts:
lowercase = Artifact(__SCREAMING_SNAKE_CASE )
_available_artifacts[artifact_name].add_path(__SCREAMING_SNAKE_CASE )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase = get_job_links()
UpperCAmelCase = retrieve_available_artifacts()
UpperCAmelCase = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase = github_actions_job_links.get('''run_doctests''')
UpperCAmelCase = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
UpperCAmelCase = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = handle_test_results(artifact['''stats'''])
UpperCAmelCase = failed
UpperCAmelCase = success
UpperCAmelCase = time_spent[1:-1] + ''', '''
UpperCAmelCase = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
UpperCAmelCase = line.replace('''FAILED ''', '''''')
UpperCAmelCase = line.split()[0].replace('''\n''', '''''')
if "::" in line:
UpperCAmelCase , UpperCAmelCase = line.split('''::''')
else:
UpperCAmelCase , UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase = all_failures[test] if test in all_failures else '''N/A'''
UpperCAmelCase = failure
break
UpperCAmelCase = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 195 | 0 |
_lowerCamelCase : str = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 357 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _UpperCAmelCase (UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Optional[int] = 1 / (2 * pi * sigma) * exp(-(square(UpperCamelCase_ ) + square(UpperCamelCase_ )) / (2 * square(UpperCamelCase_ )) )
return g
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[Any] = height - k_size + 1
_lowerCAmelCase : Tuple = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(UpperCamelCase_ ) , range(UpperCamelCase_ ) ):
_lowerCAmelCase : Optional[int] = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Dict = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : Optional[Any] = gen_gaussian_kernel(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Dict = ravel(UpperCamelCase_ )
# reshape and get the dst image
_lowerCAmelCase : List[str] = dot(UpperCamelCase_ , UpperCamelCase_ ).reshape(UpperCamelCase_ , UpperCamelCase_ ).astype(UpperCamelCase_ )
return dst
if __name__ == "__main__":
# read original image
_lowerCamelCase : int = imread(R"../image_data/lena.jpg")
# turn image in gray scale value
_lowerCamelCase : List[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowerCamelCase : int = gaussian_filter(gray, 3, sigma=1)
_lowerCamelCase : List[str] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 159 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Any = logging.get_logger(__name__)
lowercase__ :Union[str, Any] = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[Any] ='''encodec'''
def __init__( self ,A__=[1.5, 3.0, 6.0, 12.0, 24.0] ,A__=2_4_0_0_0 ,A__=1 ,A__=False ,A__=None ,A__=None ,A__=1_2_8 ,A__=3_2 ,A__=1 ,A__=[8, 5, 4, 2] ,A__="weight_norm" ,A__=7 ,A__=7 ,A__=3 ,A__=2 ,A__=True ,A__="reflect" ,A__=2 ,A__=2 ,A__=1.0 ,A__=1_0_2_4 ,A__=None ,A__=True ,**A__ ,):
lowercase = target_bandwidths
lowercase = sampling_rate
lowercase = audio_channels
lowercase = normalize
lowercase = chunk_length_s
lowercase = overlap
lowercase = hidden_size
lowercase = num_filters
lowercase = num_residual_layers
lowercase = upsampling_ratios
lowercase = norm_type
lowercase = kernel_size
lowercase = last_kernel_size
lowercase = residual_kernel_size
lowercase = dilation_growth_rate
lowercase = use_causal_conv
lowercase = pad_mode
lowercase = compress
lowercase = num_lstm_layers
lowercase = trim_right_ratio
lowercase = codebook_size
lowercase = codebook_dim if codebook_dim is not None else hidden_size
lowercase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}')
super().__init__(**A__)
@property
def A__ ( self):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def A__ ( self):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length))
@property
def A__ ( self):
lowercase = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def A__ ( self):
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0))
| 101 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 | 1 |
from __future__ import annotations
from math import pi
def __lowercase ( a__ , a__ , a__ ) -> Dict:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : str = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
UpperCamelCase__ : str = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
UpperCamelCase__ : str = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
UpperCamelCase__ : Optional[str] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
UpperCamelCase__ : Optional[str] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def __lowercase ( ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments,) )
((__SCREAMING_SNAKE_CASE) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=a__ , decoder_config=a__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
__SCREAMING_SNAKE_CASE = decoder_config.decoder_start_token_id
__SCREAMING_SNAKE_CASE = decoder_config.pad_token_id
if decoder_start_token_id is None:
__SCREAMING_SNAKE_CASE = decoder_config.bos_token_id
if pad_token_id is None:
__SCREAMING_SNAKE_CASE = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
__SCREAMING_SNAKE_CASE = decoder_config.eos_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 118 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.