code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 201 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case_ ( snake_case ) -> Union[str, Any]:
lowercase__: str = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
lowercase__: Optional[Any] = True if "large" in model_name or "huge" in model_name else False
lowercase__: int = True if "large" in model_name or "huge" in model_name else False
lowercase__: List[str] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__: Any = [3, 3, 3, 3]
lowercase__: Any = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__: Tuple = [4, 4, 4, 4]
lowercase__: Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__: Dict = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__: Tuple = [3, 3, 3, 3]
else:
lowercase__: Dict = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__: Tuple = 96
elif "small" in model_name:
lowercase__: Union[str, Any] = 96
elif "base" in model_name:
lowercase__: Tuple = 1_28
elif "large" in model_name:
lowercase__: Union[str, Any] = 1_92
elif "xlarge" in model_name:
lowercase__: Optional[Any] = 2_56
elif "huge" in model_name:
lowercase__: List[str] = 3_52
# set label information
lowercase__: Any = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
lowercase__: List[str] = "imagenet-22k-id2label.json"
else:
lowercase__: int = "imagenet-1k-id2label.json"
lowercase__: Dict = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) )
lowercase__: List[Any] = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase__: Optional[int] = {v: k for k, v in idalabel.items()}
lowercase__: Optional[int] = FocalNetConfig(
embed_dim=__snake_case , depths=__snake_case , focal_levels=__snake_case , focal_windows=__snake_case , use_conv_embed=__snake_case , idalabel=__snake_case , labelaid=__snake_case , use_post_layernorm=__snake_case , use_layerscale=__snake_case , )
return config
def snake_case_ ( snake_case ) -> Optional[Any]:
if "patch_embed.proj" in name:
lowercase__: List[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowercase__: int = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
lowercase__: List[Any] = "encoder." + name
if "encoder.layers" in name:
lowercase__: Any = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
lowercase__: Dict = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
lowercase__: int = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__: str = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__: Optional[Any] = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__: Any = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
lowercase__: int = "layernorm.weight"
if name == "norm.bias":
lowercase__: List[Any] = "layernorm.bias"
if "head" in name:
lowercase__: Any = name.replace('head' , 'classifier' )
else:
lowercase__: Tuple = "focalnet." + name
return name
def snake_case_ ( snake_case , snake_case , snake_case=False ) -> List[Any]:
# fmt: off
lowercase__: int = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
lowercase__: Optional[int] = model_name_to_url[model_name]
print('Checkpoint URL: ' , __snake_case )
lowercase__: List[Any] = torch.hub.load_state_dict_from_url(__snake_case , map_location='cpu' )["model"]
# rename keys
for key in state_dict.copy().keys():
lowercase__: str = state_dict.pop(__snake_case )
lowercase__: Tuple = val
lowercase__: Union[str, Any] = get_focalnet_config(__snake_case )
lowercase__: Any = FocalNetForImageClassification(__snake_case )
model.eval()
# load state dict
model.load_state_dict(__snake_case )
# verify conversion
lowercase__: Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__: str = BitImageProcessor(
do_resize=__snake_case , size={'shortest_edge': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=__snake_case , crop_size=2_24 , do_normalize=__snake_case , image_mean=__snake_case , image_std=__snake_case , )
lowercase__: Dict = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
lowercase__: Optional[Any] = processor(images=__snake_case , return_tensors='pt' )
lowercase__: Optional[int] = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__: Optional[Any] = image_transforms(__snake_case ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __snake_case , atol=1e-4 )
lowercase__: int = model(**__snake_case )
lowercase__: Optional[Any] = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__: str = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowercase__: int = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowercase__: List[Any] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowercase__: str = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowercase__: List[str] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowercase__: Tuple = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
if push_to_hub:
print(f'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(f'{model_name}' )
processor.push_to_hub(f'{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
__lowerCAmelCase = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 364 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
__lowercase : Tuple = ['input_values', 'attention_mask']
def __init__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 16_000 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = False , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 16 , lowerCAmelCase__ = 64 , lowerCAmelCase__ = "hann_window" , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 7_600 , lowerCAmelCase__ = 1E-10 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Dict = do_normalize
lowercase__: Optional[Any] = return_attention_mask
lowercase__: str = num_mel_bins
lowercase__: Dict = hop_length
lowercase__: Dict = win_length
lowercase__: Optional[int] = win_function
lowercase__: Any = frame_signal_scale
lowercase__: Tuple = fmin
lowercase__: Tuple = fmax
lowercase__: Dict = mel_floor
lowercase__: int = reduction_factor
lowercase__: List[Any] = win_length * sampling_rate // 1_000
lowercase__: Optional[Any] = hop_length * sampling_rate // 1_000
lowercase__: Optional[int] = optimal_fft_length(self.sample_size )
lowercase__: Optional[Any] = (self.n_fft // 2) + 1
lowercase__: str = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCAmelCase__ )
lowercase__: Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , lowerCAmelCase__ , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , lowerCAmelCase__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
lowercase__: List[str] = np.array(lowerCAmelCase__ , np.intaa )
lowercase__: Tuple = []
for vector, length in zip(lowerCAmelCase__ , attention_mask.sum(-1 ) ):
lowercase__: int = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase__: Tuple = padding_value
normed_input_values.append(lowerCAmelCase__ )
else:
lowercase__: Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , ) -> np.ndarray:
'''simple docstring'''
lowercase__: List[str] = spectrogram(
lowerCAmelCase__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
lowercase__: Dict = self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
else:
lowercase__: str = None
if audio_target is not None:
lowercase__: List[str] = self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
if inputs is None:
return inputs_target
else:
lowercase__: int = inputs_target['input_values']
lowercase__: List[str] = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
lowercase__: Optional[int] = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
lowercase__: int = isinstance(lowerCAmelCase__ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowercase__: Tuple = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__: Dict = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
lowercase__: Optional[Any] = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowercase__: Optional[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__: Optional[int] = [speech]
# needed to make pad() work on spectrogram inputs
lowercase__: str = self.feature_size
# convert into correct format for padding
if is_target:
lowercase__: int = [self._extract_mel_features(lowerCAmelCase__ ) for waveform in speech]
lowercase__: Dict = BatchFeature({'input_values': features} )
lowercase__: Union[str, Any] = self.num_mel_bins
else:
lowercase__: Union[str, Any] = BatchFeature({'input_values': speech} )
lowercase__: Dict = self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__: List[str] = feature_size_hack
# convert input values to correct format
lowercase__: Union[str, Any] = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
lowercase__: List[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowerCAmelCase__ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowercase__: Dict = [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowerCAmelCase__ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowercase__: Tuple = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowercase__: Tuple = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowercase__: str = [np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowercase__: Tuple = (
attention_mask
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowercase__: str = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=lowerCAmelCase__ , padding_value=self.padding_value )
if return_tensors is not None:
lowercase__: Union[str, Any] = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict[str, Any]:
'''simple docstring'''
lowercase__: int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowercase__: str = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 288 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : Union[str, Any] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 260 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
import datasets
from .evaluate import evaluate
__a = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
__a = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
__a = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowercase : Optional[int] = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowercase : str = evaluate(dataset=SCREAMING_SNAKE_CASE__ , predictions=SCREAMING_SNAKE_CASE__ )
return score
| 173 |
from __future__ import annotations
from math import ceil, floor, sqrt
def __lowercase ( _UpperCamelCase = 2000000 ) ->int:
"""simple docstring"""
lowercase : list[int] = [0]
lowercase : int
for idx in range(1, ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowercase : int = 0
# the area corresponding to the grid that gives the product closest to target
lowercase : int = 0
# an estimate of b, using the quadratic formula
lowercase : float
# the largest integer less than b_estimate
lowercase : int
# the largest integer less than b_estimate
lowercase : int
# the triangle number corresponding to b_floor
lowercase : int
# the triangle number corresponding to b_ceil
lowercase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:], 1 ):
lowercase : List[str] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowercase : str = floor(_UpperCamelCase )
lowercase : int = ceil(_UpperCamelCase )
lowercase : str = triangle_numbers[b_floor]
lowercase : str = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowercase : Optional[int] = triangle_b_first_guess * triangle_a
lowercase : Tuple = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowercase : Dict = triangle_b_second_guess * triangle_a
lowercase : Any = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 173 | 1 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowercase__ :Optional[int] = logging.getLogger()
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowercase = parser.parse_args()
return args.f
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = {}
lowercase = os.path.join(lowerCAmelCase__ , '''all_results.json''' )
if os.path.exists(lowerCAmelCase__ ):
with open(lowerCAmelCase__ , '''r''' ) as f:
lowercase = json.load(lowerCAmelCase__ )
else:
raise ValueError(f'can\'t find {path}' )
return results
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowercase__ :int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
@classmethod
def A__ ( cls):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
lowercase = tempfile.mkdtemp()
lowercase = os.path.join(cls.tmpdir ,'''default_config.yml''')
write_basic_config(save_location=cls.configPath)
lowercase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def A__ ( cls):
shutil.rmtree(cls.tmpdir)
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''})
def A__ ( self):
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''')
run_command(self._launch_args + testargs)
lowercase = get_results(A__)
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.75)
self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(A__ ,'''glue_no_trainer''')))
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''})
def A__ ( self):
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs)
lowercase = get_results(A__)
self.assertLess(result['''perplexity'''] ,1_0_0)
self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(A__ ,'''clm_no_trainer''')))
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''})
def A__ ( self):
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs)
lowercase = get_results(A__)
self.assertLess(result['''perplexity'''] ,4_2)
self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(A__ ,'''mlm_no_trainer''')))
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''})
def A__ ( self):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
lowercase = 7 if get_gpu_count() > 1 else 2
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs)
lowercase = get_results(A__)
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.75)
self.assertLess(result['''train_loss'''] ,0.5)
self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(A__ ,'''ner_no_trainer''')))
@unittest.skip(reason='''Fix me @muellerzr''')
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''})
def A__ ( self):
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs)
lowercase = get_results(A__)
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] ,2_8)
self.assertGreaterEqual(result['''eval_exact'''] ,2_8)
self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(A__ ,'''qa_no_trainer''')))
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''})
def A__ ( self):
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs)
lowercase = get_results(A__)
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.8)
self.assertTrue(os.path.exists(os.path.join(A__ ,'''swag_no_trainer''')))
@slow
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''})
def A__ ( self):
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs)
lowercase = get_results(A__)
self.assertGreaterEqual(result['''eval_rouge1'''] ,1_0)
self.assertGreaterEqual(result['''eval_rouge2'''] ,2)
self.assertGreaterEqual(result['''eval_rougeL'''] ,7)
self.assertGreaterEqual(result['''eval_rougeLsum'''] ,7)
self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(A__ ,'''summarization_no_trainer''')))
@slow
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''})
def A__ ( self):
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs)
lowercase = get_results(A__)
self.assertGreaterEqual(result['''eval_bleu'''] ,3_0)
self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0''')))
self.assertTrue(os.path.exists(os.path.join(A__ ,'''translation_no_trainer''')))
@slow
def A__ ( self):
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(A__)
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs)
lowercase = get_results(A__)
self.assertGreaterEqual(result['''eval_overall_accuracy'''] ,0.10)
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''})
def A__ ( self):
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''')
run_command(self._launch_args + testargs)
lowercase = get_results(A__)
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.6)
self.assertTrue(os.path.exists(os.path.join(A__ ,'''step_1''')))
self.assertTrue(os.path.exists(os.path.join(A__ ,'''image_classification_no_trainer''')))
| 101 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( __a ) -> int:
'''simple docstring'''
for param in module.parameters():
UpperCamelCase__ :Dict = False
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase__ :Optional[int] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Dict = plt.imshow(__a )
fig.axes.get_xaxis().set_visible(__a )
fig.axes.get_yaxis().set_visible(__a )
plt.show()
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :int = datetime.now()
UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' )
return timestamp | 97 | 0 |
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__lowercase = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def lowerCAmelCase (__UpperCamelCase : int ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
__lowercase = parser.parse_args()
if args.check_lib:
__lowercase = importlib.import_module('''transformers''')
__lowercase = Path(transformers_module.__file__).parent
else:
__lowercase = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 359 | """simple docstring"""
def lowerCAmelCase (__UpperCamelCase : int = 1_0_0_0 ):
"""simple docstring"""
__UpperCamelCase =-1
__UpperCamelCase =0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__UpperCamelCase =(n * n - 2 * a * n) // (2 * n - 2 * a)
__UpperCamelCase =n - a - b
if c * c == (a * a + b * b):
__UpperCamelCase =a * b * c
if candidate >= product:
__UpperCamelCase =candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 85 | 0 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__lowerCAmelCase = """3"""
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 196 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Optional[int] = ShapEImgaImgPipeline
__A : Tuple = ['''image''']
__A : Any = ['''image''']
__A : Optional[Any] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__A : Dict = False
@property
def __lowercase ( self) -> Any:
'''simple docstring'''
return 32
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return 32
@property
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return 8
@property
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0)
a__ : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
a__ : Dict = CLIPVisionModel(lowercase)
return model
@property
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : str = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase , do_normalize=lowercase , do_resize=lowercase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def __lowercase ( self) -> str:
'''simple docstring'''
torch.manual_seed(0)
a__ : str = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
a__ : Any = PriorTransformer(**lowercase)
return model
@property
def __lowercase ( self) -> Any:
'''simple docstring'''
torch.manual_seed(0)
a__ : List[Any] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
a__ : List[str] = ShapERenderer(**lowercase)
return model
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Dict = self.dummy_prior
a__ : List[str] = self.dummy_image_encoder
a__ : int = self.dummy_image_processor
a__ : str = self.dummy_renderer
a__ : Optional[int] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=lowercase , clip_sample=lowercase , clip_sample_range=1.0 , )
a__ : List[Any] = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowercase ( self , lowercase , lowercase=0) -> List[str]:
'''simple docstring'''
a__ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase)).to(lowercase)
if str(lowercase).startswith('mps'):
a__ : List[str] = torch.manual_seed(lowercase)
else:
a__ : str = torch.Generator(device=lowercase).manual_seed(lowercase)
a__ : Tuple = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : int = 'cpu'
a__ : List[str] = self.get_dummy_components()
a__ : Dict = self.pipeline_class(**lowercase)
a__ : Optional[int] = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__ : Tuple = pipe(**self.get_dummy_inputs(lowercase))
a__ : Any = output.images[0]
a__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a__ : List[str] = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __lowercase ( self) -> Any:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : str = torch_device == 'cpu'
a__ : Tuple = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowercase , relax_max_difference=lowercase , )
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[str] = self.get_dummy_components()
a__ : str = self.pipeline_class(**lowercase)
a__ : List[str] = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__ : Optional[Any] = 1
a__ : List[str] = 2
a__ : Optional[Any] = self.get_dummy_inputs(lowercase)
for key in inputs.keys():
if key in self.batch_params:
a__ : Any = batch_size * [inputs[key]]
a__ : int = pipe(**lowercase , num_images_per_prompt=lowercase)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png')
a__ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy')
a__ : List[str] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img')
a__ : Tuple = pipe.to(lowercase)
pipe.set_progress_bar_config(disable=lowercase)
a__ : List[Any] = torch.Generator(device=lowercase).manual_seed(0)
a__ : Optional[int] = pipe(
lowercase , generator=lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowercase , lowercase)
| 99 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __magic_name__ ( __a ):
'''simple docstring'''
__UpperCamelCase = "vit_msn"
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-0_6 , _a=224 , _a=16 , _a=3 , _a=True , **_a , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = initializer_range
lowerCamelCase = layer_norm_eps
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = qkv_bias
| 369 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCAmelCase : Tuple = _symbol_database.Default()
lowerCAmelCase : int = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
lowerCAmelCase : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCAmelCase : Dict = None
lowerCAmelCase : Union[str, Any] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCAmelCase : List[Any] = 45
lowerCAmelCase : List[str] = 1581
lowerCAmelCase : List[str] = 1517
lowerCAmelCase : List[Any] = 1570
lowerCAmelCase : List[str] = 1584
lowerCAmelCase : Tuple = 1793
lowerCAmelCase : Union[str, Any] = 1795
lowerCAmelCase : Tuple = 1916
lowerCAmelCase : Tuple = 1864
lowerCAmelCase : Any = 1905
lowerCAmelCase : int = 1919
lowerCAmelCase : Union[str, Any] = 2429
lowerCAmelCase : List[Any] = 2208
lowerCAmelCase : Tuple = 2418
lowerCAmelCase : str = 2323
lowerCAmelCase : List[str] = 2407
# @@protoc_insertion_point(module_scope)
| 168 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase__ = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
inspect_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : str = path + '''.py'''
assert script_name in os.listdir(lowerCAmelCase__ )
assert "__pycache__" not in os.listdir(lowerCAmelCase__ )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
inspect_metric(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = path + '''.py'''
assert script_name in os.listdir(lowerCAmelCase__ )
assert "__pycache__" not in os.listdir(lowerCAmelCase__ )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : Any = get_dataset_config_info(lowerCAmelCase__ , config_name=lowerCAmelCase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
with pytest.raises(lowerCAmelCase__ ):
get_dataset_config_info(lowerCAmelCase__ , config_name=lowerCAmelCase__ )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : Any = get_dataset_config_names(lowerCAmelCase__ )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
UpperCAmelCase__ : Any = get_dataset_infos(lowerCAmelCase__ )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase__ : Dict = expected_configs[0]
assert expected_config in infos
UpperCAmelCase__ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = get_dataset_infos(lowerCAmelCase__ )
assert expected_config in infos
UpperCAmelCase__ : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
with pytest.raises(lowerCAmelCase__ ):
get_dataset_split_names(lowerCAmelCase__ , config_name=lowerCAmelCase__ )
| 181 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = LEDTokenizer
lowerCAmelCase__ = LEDTokenizerFast
lowerCAmelCase__ = True
def lowercase_ ( self : int ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase__ : Any = {'''unk_token''': '''<unk>'''}
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def lowercase_ ( self : Optional[int] , **_A : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Union[str, Any] , **_A : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Tuple , _A : List[str] ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def lowercase_ ( self : Any ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase__ : Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Union[str, Any] = tokenizer(_A , max_length=len(_A ) , padding=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase__ : int = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A )
@require_torch
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : List[str] = tokenizer(_A , padding=_A , return_tensors='''pt''' )
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''labels''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
@require_torch
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Optional[Any] = tokenizer(text_target=_A , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def lowercase_ ( self : Tuple ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Any = tokenizer(
['''I am a small frog''' * 1_024, '''I am a small frog'''] , padding=_A , truncation=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = ['''A long paragraph for summarization.''']
UpperCAmelCase__ : List[Any] = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Optional[Any] = tokenizer(_A , return_tensors='''pt''' )
UpperCAmelCase__ : int = tokenizer(text_target=_A , return_tensors='''pt''' )
UpperCAmelCase__ : str = inputs['''input_ids''']
UpperCAmelCase__ : Tuple = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ : Tuple = ['''Summary of the text.''', '''Another summary.''']
UpperCAmelCase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase__ : List[str] = tokenizer(_A , padding=_A )
UpperCAmelCase__ : str = [[0] * len(_A ) for x in encoded_output['''input_ids''']]
UpperCAmelCase__ : Any = tokenizer.pad(_A )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
def lowercase_ ( self : Dict ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase__ : int = self.tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase__ : Any = '''A, <mask> AllenNLP sentence.'''
UpperCAmelCase__ : Dict = tokenizer_r.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
UpperCAmelCase__ : Optional[int] = tokenizer_p.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
UpperCAmelCase__ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
UpperCAmelCase__ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 181 | 1 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __A ( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
SCREAMING_SNAKE_CASE_ = VideoClassificationPipeline(model=__magic_name__ , image_processor=__magic_name__ , top_k=2 )
SCREAMING_SNAKE_CASE_ = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def __A ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : Dict ) -> Tuple:
for example in examples:
SCREAMING_SNAKE_CASE_ = video_classifier(__magic_name__ )
self.assertEqual(
__magic_name__ , [
{"score": ANY(__magic_name__ ), "label": ANY(__magic_name__ )},
{"score": ANY(__magic_name__ ), "label": ANY(__magic_name__ )},
] , )
@require_torch
def __A ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
SCREAMING_SNAKE_CASE_ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
SCREAMING_SNAKE_CASE_ = pipeline(
"video-classification" , model=__magic_name__ , feature_extractor=__magic_name__ , frame_sampling_rate=4 )
SCREAMING_SNAKE_CASE_ = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
SCREAMING_SNAKE_CASE_ = video_classifier(__magic_name__ , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] , )
SCREAMING_SNAKE_CASE_ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
] , )
@require_tf
def __A ( self : Optional[int] ) -> Tuple:
pass
| 305 | import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( __UpperCamelCase ):
return x + 2
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
SCREAMING_SNAKE_CASE_ = "x = y"
SCREAMING_SNAKE_CASE_ = {"y": 5}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 5, "y": 5} )
def __A ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "y = add_two(x)"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
def __A ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "x = 3\ny = 5"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
def __A ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "text = f'This is x: {x}.'"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"x": 3, "text": "This is x: 3."} )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = "if x <= 3:\n y = 2\nelse:\n y = 5"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 2} )
SCREAMING_SNAKE_CASE_ = {"x": 8}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 8, "y": 5} )
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
def __A ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "y = x"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 3} )
def __A ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]\ntest_list[1]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = "x = 0\nfor i in range(3):\n x = i"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"range": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 2, "i": 2} )
| 305 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'trocr'
lowerCamelCase__ = ['past_key_values']
lowerCamelCase__ = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self, __a=5_0265, __a=1024, __a=12, __a=16, __a=4096, __a="gelu", __a=512, __a=0.1, __a=0.0, __a=0.0, __a=2, __a=0.02, __a=0.0, __a=True, __a=False, __a=True, __a=True, __a=1, __a=0, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Dict = d_model
_lowerCAmelCase : str = decoder_layers
_lowerCAmelCase : Dict = decoder_attention_heads
_lowerCAmelCase : int = decoder_ffn_dim
_lowerCAmelCase : int = activation_function
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Optional[Any] = dropout
_lowerCAmelCase : str = attention_dropout
_lowerCAmelCase : str = activation_dropout
_lowerCAmelCase : List[Any] = init_std
_lowerCAmelCase : Optional[Any] = decoder_layerdrop
_lowerCAmelCase : Optional[int] = use_cache
_lowerCAmelCase : str = scale_embedding
_lowerCAmelCase : Optional[int] = use_learned_position_embeddings
_lowerCAmelCase : Optional[Any] = layernorm_embedding
super().__init__(
pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, decoder_start_token_id=__a, **__a, )
| 36 | from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Dict = ['''keras_nlp''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''keras_nlp'''] ) | 210 | 0 |
import warnings
from functools import wraps
from typing import Callable
def UpperCAmelCase_ ( __UpperCAmelCase : Callable ) -> Callable:
@wraps(__UpperCAmelCase )
def _inner_fn(*__UpperCAmelCase : str , **__UpperCAmelCase : List[Any] ):
warnings.warn(
(f"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") , __UpperCAmelCase , )
return fn(*__UpperCAmelCase , **__UpperCAmelCase )
return _inner_fn | 353 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any=7 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : List[str]=18 , _lowerCAmelCase : Any=30 , _lowerCAmelCase : List[Any]=400 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]=[0.5, 0.5, 0.5] , _lowerCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , ):
SCREAMING_SNAKE_CASE_ = size if size is not None else {'shortest_edge': 18}
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
def lowerCAmelCase_ ( self : List[Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = LevitImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = LevitImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowerCAmelCase_ ( self : Dict ):
pass
def lowerCAmelCase_ ( self : int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase_ ( self : Tuple ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 210 | 0 |
import qiskit
def lowerCamelCase__ ( a__ : int , a__ : int ) -> qiskit.result.counts.Counts:
UpperCamelCase_ = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
UpperCamelCase_ = qiskit.QuantumCircuit(a__ , a__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCamelCase_ = qiskit.execute(a__ , a__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a__ )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 122 |
from __future__ import annotations
def lowerCamelCase__ ( a__ : int | float | str , a__ : int | float | str ) -> list[str]:
if nth_term == "":
return [""]
UpperCamelCase_ = int(a__ )
UpperCamelCase_ = int(a__ )
UpperCamelCase_ = []
for temp in range(int(a__ ) ):
series.append(f'''1 / {pow(temp + 1 , int(a__ ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = int(input('''Enter the last number (nth term) of the P-Series'''))
_A = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 122 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __magic_name__ ( lowerCAmelCase ):
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int =self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(snake_case , 'tf_padding'))
self.parent.assertTrue(hasattr(snake_case , 'depth_multiplier'))
class __magic_name__ :
def __init__( self , snake_case , snake_case=1_3 , snake_case=3 , snake_case=3_2 , snake_case=0.25 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=3_2 , snake_case=True , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=1_2_8_0 , snake_case=0.1 , snake_case=0.02 , snake_case=True , snake_case=True , snake_case=1_0 , snake_case=None , ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =parent
_UpperCAmelCase : int =batch_size
_UpperCAmelCase : int =num_channels
_UpperCAmelCase : Union[str, Any] =image_size
_UpperCAmelCase : Tuple =depth_multiplier
_UpperCAmelCase : Optional[int] =depth_divisible_by
_UpperCAmelCase : Tuple =min_depth
_UpperCAmelCase : Tuple =expand_ratio
_UpperCAmelCase : Any =tf_padding
_UpperCAmelCase : Union[str, Any] =output_stride
_UpperCAmelCase : Optional[Any] =first_layer_is_expansion
_UpperCAmelCase : Optional[Any] =finegrained_output
_UpperCAmelCase : List[Any] =hidden_act
_UpperCAmelCase : str =last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier)
_UpperCAmelCase : str =classifier_dropout_prob
_UpperCAmelCase : Optional[Any] =use_labels
_UpperCAmelCase : Optional[Any] =is_training
_UpperCAmelCase : List[str] =num_labels
_UpperCAmelCase : Optional[Any] =initializer_range
_UpperCAmelCase : str =scope
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCAmelCase : List[Any] =None
_UpperCAmelCase : Dict =None
if self.use_labels:
_UpperCAmelCase : Tuple =ids_tensor([self.batch_size] , self.num_labels)
_UpperCAmelCase : Optional[int] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
_UpperCAmelCase : Optional[Any] =self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : str =MobileNetVaModel(config=snake_case)
model.to(snake_case)
model.eval()
_UpperCAmelCase : Optional[int] =model(snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.num_labels
_UpperCAmelCase : Tuple =MobileNetVaForImageClassification(snake_case)
model.to(snake_case)
model.eval()
_UpperCAmelCase : Any =model(snake_case , labels=snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] =self.num_labels
_UpperCAmelCase : List[str] =MobileNetVaForSemanticSegmentation(snake_case)
model.to(snake_case)
model.eval()
_UpperCAmelCase : List[str] =model(snake_case)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCAmelCase : int =model(snake_case , labels=snake_case)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any =config_and_inputs
_UpperCAmelCase : Optional[int] ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =(
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase =(
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase =False
UpperCAmelCase =False
UpperCAmelCase =False
UpperCAmelCase =False
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : str =MobileNetVaModelTester(self)
_UpperCAmelCase : int =MobileNetVaConfigTester(self , config_class=snake_case , has_text_modality=snake_case)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds')
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings')
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions')
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict =model_class(snake_case)
_UpperCAmelCase : Dict =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple =[*signature.parameters.keys()]
_UpperCAmelCase : Any =['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case)
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case)
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
def check_hidden_states_output(snake_case , snake_case , snake_case):
_UpperCAmelCase : Optional[Any] =model_class(snake_case)
model.to(snake_case)
model.eval()
with torch.no_grad():
_UpperCAmelCase : Tuple =model(**self._prepare_for_class(snake_case , snake_case))
_UpperCAmelCase : Dict =outputs.hidden_states
_UpperCAmelCase : Any =1_6
self.assertEqual(len(snake_case) , snake_case)
_UpperCAmelCase , _UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict =True
check_hidden_states_output(snake_case , snake_case , snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Any =True
check_hidden_states_output(snake_case , snake_case , snake_case)
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case)
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case)
@slow
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Tuple =MobileNetVaModel.from_pretrained(snake_case)
self.assertIsNotNone(snake_case)
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : List[Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224') if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : int =MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224').to(snake_case)
_UpperCAmelCase : List[Any] =self.default_image_processor
_UpperCAmelCase : Optional[Any] =prepare_img()
_UpperCAmelCase : Dict =image_processor(images=snake_case , return_tensors='pt').to(snake_case)
# forward pass
with torch.no_grad():
_UpperCAmelCase : int =model(**snake_case)
# verify the logits
_UpperCAmelCase : Dict =torch.Size((1, 1_0_0_1))
self.assertEqual(outputs.logits.shape , snake_case)
_UpperCAmelCase : str =torch.tensor([0.24_45, -1.19_93, 0.19_05]).to(snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4))
@slow
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513')
_UpperCAmelCase : List[str] =model.to(snake_case)
_UpperCAmelCase : Optional[int] =MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513')
_UpperCAmelCase : Optional[Any] =prepare_img()
_UpperCAmelCase : List[str] =image_processor(images=snake_case , return_tensors='pt').to(snake_case)
# forward pass
with torch.no_grad():
_UpperCAmelCase : Optional[Any] =model(**snake_case)
_UpperCAmelCase : Tuple =outputs.logits
# verify the logits
_UpperCAmelCase : str =torch.Size((1, 2_1, 6_5, 6_5))
self.assertEqual(logits.shape , snake_case)
_UpperCAmelCase : Optional[Any] =torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1E-4))
| 242 |
'''simple docstring'''
from string import ascii_uppercase
lowercase ={str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 3_6:
raise ValueError('base must be <= 36' )
_UpperCAmelCase : Union[str, Any] =''
_UpperCAmelCase : Optional[int] =0
_UpperCAmelCase : str =0
while div != 1:
_UpperCAmelCase , _UpperCAmelCase : int =divmod(__lowerCamelCase , __lowerCamelCase )
if base >= 1_1 and 9 < mod < 3_6:
_UpperCAmelCase : str =ALPHABET_VALUES[str(__lowerCamelCase )]
else:
_UpperCAmelCase : Any =str(__lowerCamelCase )
new_value += actual_value
_UpperCAmelCase : Union[str, Any] =num // base
_UpperCAmelCase : Dict =div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__lowerCamelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 242 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
_SCREAMING_SNAKE_CASE : int = {
"gpt-neox-20b": 2048,
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : str = VOCAB_FILES_NAMES
lowerCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : str = ["input_ids", "attention_mask"]
def __init__( self , a__=None , a__=None , a__=None , a__="<|endoftext|>" , a__="<|endoftext|>" , a__="<|endoftext|>" , a__=False , **a__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , add_prefix_space=a__ , **a__ , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space:
snake_case_ = getattr(a__ , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**a__ )
snake_case_ = add_prefix_space
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def lowerCAmelCase__ ( self , a__ ) -> List[int]:
'''simple docstring'''
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids
| 85 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Dict ) -> None:
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 107 | 0 |
"""simple docstring"""
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__SCREAMING_SNAKE_CASE ="bert-base-cased"
__SCREAMING_SNAKE_CASE ="fp16"
__SCREAMING_SNAKE_CASE ="bf16"
__SCREAMING_SNAKE_CASE =[FPaa, BFaa]
@require_fsdp
@require_cuda
class __lowerCamelCase ( lowercase_ ):
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowercase_ : str = dict(
ACCELERATE_USE_FSDP='true' ,MASTER_ADDR='localhost' ,MASTER_PORT='10999' ,RANK='0' ,LOCAL_RANK='0' ,WORLD_SIZE='1' ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__UpperCamelCase ):
lowercase_ : Dict = self.dist_env.copy()
lowercase_ : Union[str, Any] = f'''{i + 1}'''
lowercase_ : int = strategy
with mockenv_context(**__UpperCamelCase ):
lowercase_ : int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__UpperCamelCase ):
lowercase_ : Optional[Any] = self.dist_env.copy()
lowercase_ : List[str] = prefetch_policy
with mockenv_context(**__UpperCamelCase ):
lowercase_ : List[Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__UpperCamelCase ):
lowercase_ : Any = self.dist_env.copy()
lowercase_ : List[Any] = state_dict_type
with mockenv_context(**__UpperCamelCase ):
lowercase_ : Union[str, Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Dict = AutoModel.from_pretrained(__UpperCamelCase )
for policy in FSDP_AUTO_WRAP_POLICY:
lowercase_ : List[str] = self.dist_env.copy()
lowercase_ : List[str] = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowercase_ : str = 'BertLayer'
elif policy == "SIZE_BASED_WRAP":
lowercase_ : List[str] = '2000'
with mockenv_context(**__UpperCamelCase ):
lowercase_ : Dict = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__UpperCamelCase )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
lowercase_ : Optional[int] = self.dist_env.copy()
lowercase_ : Optional[int] = 'TRANSFORMER_BASED_WRAP'
lowercase_ : Any = 'T5Layer'
with mockenv_context(**__UpperCamelCase ):
lowercase_ : Optional[int] = FullyShardedDataParallelPlugin()
with self.assertRaises(__UpperCamelCase ) as cm:
fsdp_plugin.set_auto_wrap_policy(__UpperCamelCase )
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) )
lowercase_ : Optional[int] = self.dist_env.copy()
lowercase_ : str = 'SIZE_BASED_WRAP'
lowercase_ : Union[str, Any] = '0'
with mockenv_context(**__UpperCamelCase ):
lowercase_ : Union[str, Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__UpperCamelCase )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowercase_ : List[Any] = self.dist_env.copy()
lowercase_ : Optional[Any] = mp_dtype
with mockenv_context(**__UpperCamelCase ):
lowercase_ : Any = Accelerator()
if mp_dtype == "fp16":
lowercase_ : int = torch.floataa
elif mp_dtype == "bf16":
lowercase_ : str = torch.bfloataa
lowercase_ : Optional[Any] = MixedPrecision(param_dtype=__UpperCamelCase ,reduce_dtype=__UpperCamelCase ,buffer_dtype=__UpperCamelCase )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,__UpperCamelCase )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler ,__UpperCamelCase ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowercase_ : List[str] = self.dist_env.copy()
lowercase_ : Optional[Any] = str(__UpperCamelCase ).lower()
with mockenv_context(**__UpperCamelCase ):
lowercase_ : Optional[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=__UpperCamelCase ) )
@require_fsdp
@require_multi_gpu
@slow
class __lowerCamelCase ( lowercase_ ):
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowercase_ : Union[str, Any] = 0.82
lowercase_ : List[str] = [
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
lowercase_ : str = {
'multi_gpu_fp16': 3200,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2000,
'fsdp_full_shard_transformer_based_wrap_fp16': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowercase_ : Dict = 160
lowercase_ : Optional[Any] = 160
lowercase_ : str = inspect.getfile(accelerate.test_utils )
lowercase_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[str] = os.path.join(self.test_scripts_folder ,'test_performance.py' )
lowercase_ : Optional[int] = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
lowercase_ : str = cmd.copy()
for i, strategy in enumerate(__UpperCamelCase ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no' )
else:
cmd_config.append('--mixed_precision=fp16' )
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCamelCase ,env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Dict = os.path.join(self.test_scripts_folder ,'test_checkpointing.py' )
lowercase_ : Optional[int] = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(__UpperCamelCase ):
lowercase_ : Any = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
lowercase_ : int = len(__UpperCamelCase )
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowercase_ : int = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
'--partial_train_epoch=1',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCamelCase ,env=os.environ.copy() )
lowercase_ : int = cmd_config[:-1]
lowercase_ : int = os.path.join(self.tmpdir ,'epoch_0' )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCamelCase ,env=os.environ.copy() )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = os.path.join(self.test_scripts_folder ,'test_peak_memory_usage.py' )
lowercase_ : Dict = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowercase_ : List[Any] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'] )
else:
cmd_config.extend(['--mixed_precision=no'] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'] )
for i, strategy in enumerate(__UpperCamelCase ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCamelCase ,env=os.environ.copy() )
| 365 | """simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Tuple = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : Any = use_input_mask
lowercase_ : Optional[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Union[str, Any] = scope
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : str = self.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = True
lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Union[str, Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,)
lowercase_ : Dict = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = True
lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
# first forward pass
lowercase_ : str = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,)
lowercase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase_ : int = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
lowercase_ : List[Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
# select random slice
lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoderTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs()
lowercase_ : Optional[int] = 'bert'
self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Tuple = model(__UpperCamelCase )[0]
lowercase_ : Dict = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Dict = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321 | 0 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ : str = """▁"""
snake_case_ : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __a (__snake_case , unittest.TestCase ):
__a : Any = BigBirdTokenizer
__a : Any = BigBirdTokenizerFast
__a : List[str] = True
__a : Union[str, Any] = True
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : List[Any] = self.tokenizer_class(a_ , keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = '''<s>'''
UpperCAmelCase_ : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(a_ ) , 10_04 )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : Dict = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ : int = tokenizer.tokenize(a_ )
UpperCAmelCase_ : str = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
UpperCAmelCase_ : Tuple = tokenizer.encode(a_ , add_special_tokens=a_ )
UpperCAmelCase_ : Tuple = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Optional[int] = tokenizer.encode(a_ )
UpperCAmelCase_ : Dict = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = BigBirdTokenizer(a_ , keep_accents=a_ )
UpperCAmelCase_ : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ : Tuple = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ : Optional[Any] = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = '''Hello World!'''
UpperCAmelCase_ : List[Any] = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ : Optional[int] = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@require_torch
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ : Tuple = ''' '''.join(a_ )
UpperCAmelCase_ : Tuple = self.big_tokenizer.encode_plus(a_ , return_tensors='''pt''' , return_token_type_ids=a_ )
UpperCAmelCase_ : List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=a_ )
UpperCAmelCase_ : Optional[int] = BigBirdConfig(attention_type='''original_full''' )
UpperCAmelCase_ : str = BigBirdModel(a_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a_ )
model(**a_ )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
UpperCAmelCase_ : Any = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Tuple = {'''input_ids''': [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 125 |
"""simple docstring"""
def lowercase ( _snake_case : int , _snake_case : int ) ->str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__snake_case : Tuple = str(bin(_snake_case ) )[2:] # remove the leading "0b"
__snake_case : List[Any] = str(bin(_snake_case ) )[2:]
__snake_case : Any = max(len(_snake_case ) , len(_snake_case ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_snake_case ) , b_binary.zfill(_snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 0 |
'''simple docstring'''
def UpperCamelCase ( a ) -> list:
'''simple docstring'''
if len(a ) <= 1:
return [tuple(a )]
__magic_name__ = []
def generate(a , a ):
__magic_name__ = [0] * n
res.append(tuple(a ) )
__magic_name__ = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
__magic_name__ , __magic_name__ = arr[i], arr[0]
else:
__magic_name__ , __magic_name__ = arr[i], arr[c[i]]
res.append(tuple(a ) )
c[i] += 1
__magic_name__ = 0
else:
__magic_name__ = 0
i += 1
generate(len(a ) , a )
return res
if __name__ == "__main__":
_lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip()
_lowerCAmelCase = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 98 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
_lowerCAmelCase = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_lowerCAmelCase = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :str = """whisper"""
__SCREAMING_SNAKE_CASE :str = ["""past_key_values"""]
__SCREAMING_SNAKE_CASE :Tuple = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Dict , a__ : Optional[int]=5_1865 , a__ : str=80 , a__ : List[str]=6 , a__ : List[str]=4 , a__ : List[Any]=6 , a__ : Union[str, Any]=4 , a__ : Tuple=1536 , a__ : Optional[int]=1536 , a__ : List[str]=0.0 , a__ : Union[str, Any]=0.0 , a__ : Union[str, Any]=5_0257 , a__ : Dict=True , a__ : Optional[Any]=True , a__ : Union[str, Any]="gelu" , a__ : Tuple=256 , a__ : Dict=0.0 , a__ : str=0.0 , a__ : Optional[Any]=0.0 , a__ : int=0.02 , a__ : Any=False , a__ : List[Any]=1500 , a__ : Optional[int]=448 , a__ : Dict=5_0256 , a__ : str=5_0256 , a__ : Tuple=5_0256 , a__ : List[str]=None , a__ : List[Any]=[220, 5_0256] , a__ : Any=False , a__ : Dict=256 , a__ : Optional[Any]=False , a__ : str=0.05 , a__ : List[Any]=10 , a__ : List[Any]=2 , a__ : Optional[int]=0.0 , a__ : List[Any]=10 , a__ : Union[str, Any]=0 , a__ : int=7 , **a__ : Any , ):
__magic_name__ = vocab_size
__magic_name__ = num_mel_bins
__magic_name__ = d_model
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = encoder_ffn_dim
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = use_cache
__magic_name__ = encoder_layers
__magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True
__magic_name__ = max_source_positions
__magic_name__ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__magic_name__ = classifier_proj_size
__magic_name__ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__magic_name__ = apply_spec_augment
__magic_name__ = mask_time_prob
__magic_name__ = mask_time_length
__magic_name__ = mask_time_min_masks
__magic_name__ = mask_feature_prob
__magic_name__ = mask_feature_length
__magic_name__ = mask_feature_min_masks
__magic_name__ = median_filter_width
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , suppress_tokens=a__ , begin_suppress_tokens=a__ , **a__ , )
class _SCREAMING_SNAKE_CASE ( __a ):
@property
def snake_case__ ( self : List[str] ):
__magic_name__ = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
__magic_name__ = {0: '''batch'''}
else:
__magic_name__ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction='''inputs''' )
return common_inputs
def snake_case__ ( self : Optional[int] , a__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a__ : int = -1 , a__ : int = -1 , a__ : bool = False , a__ : Optional["TensorType"] = None , a__ : int = 2_2050 , a__ : float = 5.0 , a__ : int = 220 , ):
__magic_name__ = OrderedDict()
__magic_name__ = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=a__ , framework=a__ , sampling_rate=a__ , time_duration=a__ , frequency=a__ , )
__magic_name__ = encoder_inputs['''input_features'''].shape[2]
__magic_name__ = encoder_sequence_length // 2 if self.use_past else seq_length
__magic_name__ = super().generate_dummy_inputs(
preprocessor.tokenizer , a__ , a__ , a__ , a__ )
__magic_name__ = encoder_inputs.pop('''input_features''' )
__magic_name__ = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
__magic_name__ = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def snake_case__ ( self : Dict ):
return 1E-3
| 98 | 1 |
"""simple docstring"""
from collections.abc import Callable
def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
_A = a
_A = b
if function(SCREAMING_SNAKE_CASE_ ) == 0: # one of the a or b is a root for the function
return a
elif function(SCREAMING_SNAKE_CASE_ ) == 0:
return b
elif (
function(SCREAMING_SNAKE_CASE_ ) * function(SCREAMING_SNAKE_CASE_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_A = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(SCREAMING_SNAKE_CASE_ ) == 0:
return mid
elif function(SCREAMING_SNAKE_CASE_ ) * function(SCREAMING_SNAKE_CASE_ ) < 0:
_A = mid
else:
_A = mid
_A = start + (end - start) / 2.0
return mid
def _snake_case ( _snake_case : float ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 315 |
import string
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> None:
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
A__ = ""
for symbol in message:
if symbol in string.ascii_uppercase:
A__ = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE_ )
A__ = num - key
if num < 0:
A__ = num + len(string.ascii_uppercase )
A__ = translated + string.ascii_uppercase[num]
else:
A__ = translated + symbol
print(F'Decryption using Key #{key}: {translated}' )
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
A__ = input("Encrypted message: " )
A__ = message.upper()
decrypt(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 68 | 0 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[int] = ['''image_processor''', '''tokenizer''']
__lowercase : List[Any] = '''AutoImageProcessor'''
__lowercase : List[str] = '''AutoTokenizer'''
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""")
__SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.image_processor
__SCREAMING_SNAKE_CASE = False
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = kwargs.pop("""images""" , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = kwargs.pop("""text""" , lowerCAmelCase__)
if len(lowerCAmelCase__) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""")
if images is not None:
__SCREAMING_SNAKE_CASE = self.image_processor(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__)
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__)
if text is None:
return inputs
elif images is None:
return encodings
else:
__SCREAMING_SNAKE_CASE = encodings["""input_ids"""]
return inputs
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__)
@contextmanager
def snake_case_ ( self):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""")
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer
yield
__SCREAMING_SNAKE_CASE = self.image_processor
__SCREAMING_SNAKE_CASE = False
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=None):
if added_vocab is None:
__SCREAMING_SNAKE_CASE = self.tokenizer.get_added_vocab()
__SCREAMING_SNAKE_CASE = {}
while tokens:
__SCREAMING_SNAKE_CASE = re.search(R"""<s_(.*?)>""" , lowerCAmelCase__ , re.IGNORECASE)
if start_token is None:
break
__SCREAMING_SNAKE_CASE = start_token.group(1)
__SCREAMING_SNAKE_CASE = re.search(Rf"</s_{key}>" , lowerCAmelCase__ , re.IGNORECASE)
__SCREAMING_SNAKE_CASE = start_token.group()
if end_token is None:
__SCREAMING_SNAKE_CASE = tokens.replace(lowerCAmelCase__ , """""")
else:
__SCREAMING_SNAKE_CASE = end_token.group()
__SCREAMING_SNAKE_CASE = re.escape(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = re.escape(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , lowerCAmelCase__ , re.IGNORECASE)
if content is not None:
__SCREAMING_SNAKE_CASE = content.group(1).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__SCREAMING_SNAKE_CASE = self.tokenajson(lowerCAmelCase__ , is_inner_value=lowerCAmelCase__ , added_vocab=lowerCAmelCase__)
if value:
if len(lowerCAmelCase__) == 1:
__SCREAMING_SNAKE_CASE = value[0]
__SCREAMING_SNAKE_CASE = value
else: # leaf nodes
__SCREAMING_SNAKE_CASE = []
for leaf in content.split(R"""<sep/>"""):
__SCREAMING_SNAKE_CASE = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__SCREAMING_SNAKE_CASE = leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase__)
if len(output[key]) == 1:
__SCREAMING_SNAKE_CASE = output[key][0]
__SCREAMING_SNAKE_CASE = tokens[tokens.find(lowerCAmelCase__) + len(lowerCAmelCase__) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowerCAmelCase__ , added_vocab=lowerCAmelCase__)
if len(lowerCAmelCase__):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def snake_case_ ( self):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase__ , )
return self.image_processor_class
@property
def snake_case_ ( self):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase__ , )
return self.image_processor
| 354 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
while b:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = b, a % b
return a
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
return a if b == 0 else euclidean_gcd_recursive(UpperCamelCase_ , a % b )
def _lowerCAmelCase ( ):
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 255 | 0 |
from __future__ import annotations
import queue
class __snake_case :
def __init__( self : Dict , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = data
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def A () -> TreeNode:
"""simple docstring"""
print('''\n********Press N to stop entering at any point of time********\n''' )
UpperCAmelCase_ = input('''Enter the value of the root node: ''' ).strip().lower()
UpperCAmelCase_ = queue.Queue()
UpperCAmelCase_ = TreeNode(int(__A ) )
q.put(__A )
while not q.empty():
UpperCAmelCase_ = q.get()
UpperCAmelCase_ = F"""Enter the left node of {node_found.data}: """
UpperCAmelCase_ = input(__A ).strip().lower() or '''n'''
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(__A ) )
UpperCAmelCase_ = left_node
q.put(__A )
UpperCAmelCase_ = F"""Enter the right node of {node_found.data}: """
UpperCAmelCase_ = input(__A ).strip().lower() or '''n'''
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(__A ) )
UpperCAmelCase_ = right_node
q.put(__A )
raise
def A (__A : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__A , __A ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def A (__A : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__A , __A ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def A (__A : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__A , __A ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def A (__A : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__A , __A ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(__A )
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def A (__A : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__A , __A ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(__A )
while not q.empty():
UpperCAmelCase_ = []
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__A )
def A (__A : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__A , __A ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(__A )
UpperCAmelCase_ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase_ = stack.pop()
# start to traverse its right child
UpperCAmelCase_ = n.right
def A (__A : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__A , __A ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n:
stack.append(__A )
UpperCAmelCase_ = n.left
UpperCAmelCase_ = stack.pop()
print(n.data , end=''',''' )
UpperCAmelCase_ = n.right
def A (__A : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__A , __A ) or not node:
return
UpperCAmelCase_ , UpperCAmelCase_ = [], []
UpperCAmelCase_ = node
stacka.append(__A )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__A )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def A (__A : str = "" , __A : Optional[Any]=50 , __A : Union[str, Any]="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(__A ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
snake_case_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 51 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'vocab_file': 'sentencepiece.model'}
UpperCAmelCase__ = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
UpperCAmelCase__ = {
'google/rembert': 256,
}
class lowerCAmelCase__ ( A_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Any=True , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : int="[CLS]" , _lowerCamelCase : Optional[int]="[SEP]" , _lowerCamelCase : Optional[int]="[UNK]" , _lowerCamelCase : Optional[Any]="[SEP]" , _lowerCamelCase : str="[PAD]" , _lowerCamelCase : List[Any]="[CLS]" , _lowerCamelCase : Any="[MASK]" , **_lowerCamelCase : Optional[int] , ):
super().__init__(
do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , )
_snake_case = do_lower_case
_snake_case = remove_space
_snake_case = keep_accents
_snake_case = vocab_file
_snake_case = spm.SentencePieceProcessor()
self.sp_model.Load(_lowerCamelCase )
@property
def lowercase ( self : int ):
return len(self.sp_model )
def lowercase ( self : Any ):
_snake_case = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self : List[str] , _lowerCamelCase : Tuple ):
_snake_case = d
_snake_case = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowercase ( self : str , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=False ):
_snake_case = self.sp_model.EncodeAsPieces(_lowerCamelCase )
return pieces
def lowercase ( self : str , _lowerCamelCase : str ):
return self.sp_model.PieceToId(_lowerCamelCase )
def lowercase ( self : List[str] , _lowerCamelCase : int ):
return self.sp_model.IdToPiece(_lowerCamelCase )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : Any ):
_snake_case = self.sp_model.decode_pieces(_lowerCamelCase )
return out_string
def lowercase ( self : Optional[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : Tuple , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1]
def lowercase ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowerCamelCase ) )
return
_snake_case = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 288 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_lowercase = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
_lowercase = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = """ Hello world! cécé herlolip"""
_lowercase = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def A (__lowerCamelCase :Optional[int] , __lowerCamelCase :List[Any] , __lowerCamelCase :Tuple ):
_lowerCAmelCase = dct.pop(__lowerCamelCase )
_lowerCAmelCase = val
def A (__lowerCamelCase :str ):
_lowerCAmelCase = torch.load(__lowerCamelCase , map_location="""cpu""" )
_lowerCAmelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def A (__lowerCamelCase :Any ):
_lowerCAmelCase , _lowerCAmelCase = emb.weight.shape
_lowerCAmelCase = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_lowerCAmelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Any , __lowerCamelCase :Dict=None ):
if not os.path.exists(__lowerCamelCase ):
_lowerCAmelCase = torch.hub.load("""pytorch/fairseq""" , __lowerCamelCase ).eval()
else:
_lowerCAmelCase = load_xsum_checkpoint(__lowerCamelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_lowerCAmelCase = checkpoint_path.replace(""".""" , """-""" )
_lowerCAmelCase = BartConfig.from_pretrained(__lowerCamelCase )
_lowerCAmelCase = bart.encode(__lowerCamelCase ).unsqueeze(0 )
_lowerCAmelCase = BartTokenizer.from_pretrained(__lowerCamelCase ).encode(__lowerCamelCase , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(__lowerCamelCase , __lowerCamelCase ).all():
raise ValueError(
f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
_lowerCAmelCase = bart.state_dict()
remove_ignore_keys_(__lowerCamelCase )
_lowerCAmelCase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = BartForSequenceClassification(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
_lowerCAmelCase = bart.predict("""mnli""" , __lowerCamelCase , return_logits=__lowerCamelCase )
_lowerCAmelCase = model(__lowerCamelCase )[0] # logits
else: # no classification heads to worry about
_lowerCAmelCase = bart.model.state_dict()
remove_ignore_keys_(__lowerCamelCase )
_lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""]
_lowerCAmelCase = bart.extract_features(__lowerCamelCase )
if hf_checkpoint_name == "facebook/bart-large":
_lowerCAmelCase = BartModel(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
_lowerCAmelCase = model(__lowerCamelCase ).model[0]
else:
_lowerCAmelCase = BartForConditionalGeneration(__lowerCamelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(__lowerCamelCase )
if hasattr(__lowerCamelCase , """lm_head""" ):
_lowerCAmelCase = make_linear_from_emb(model.model.shared )
_lowerCAmelCase = model.model(__lowerCamelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
_lowercase = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 229 |
'''simple docstring'''
import os
def A ():
with open(os.path.dirname(__lowerCamelCase ) + """/grid.txt""" ) as f:
_lowerCAmelCase = [] # noqa: E741
for _ in range(20 ):
l.append([int(__lowerCamelCase ) for x in f.readline().split()] )
_lowerCAmelCase = 0
# right
for i in range(20 ):
for j in range(17 ):
_lowerCAmelCase = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_lowerCAmelCase = temp
# down
for i in range(17 ):
for j in range(20 ):
_lowerCAmelCase = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_lowerCAmelCase = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_lowerCAmelCase = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_lowerCAmelCase = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_lowerCAmelCase = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_lowerCAmelCase = temp
return maximum
if __name__ == "__main__":
print(solution())
| 229 | 1 |
"""simple docstring"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =0
# if input_string is "aba" than new_input_string become "a|b|a"
SCREAMING_SNAKE_CASE_: str =""""""
SCREAMING_SNAKE_CASE_: Optional[int] =""""""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowercase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =0, 0
# length[i] shows the length of palindromic substring with center i
SCREAMING_SNAKE_CASE_: List[str] =[1 for i in range(len(lowercase ) )]
# for each character in new_string find corresponding palindromic string
SCREAMING_SNAKE_CASE_: List[str] =0
for j in range(len(lowercase ) ):
SCREAMING_SNAKE_CASE_: List[str] =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(lowercase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
SCREAMING_SNAKE_CASE_: List[Any] =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
SCREAMING_SNAKE_CASE_: Dict =j - k + 1 # noqa: E741
SCREAMING_SNAKE_CASE_: List[Any] =j + k - 1
# update max_length and start position
if max_length < length[j]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =length[j]
SCREAMING_SNAKE_CASE_: Optional[Any] =j
# create that string
SCREAMING_SNAKE_CASE_: List[Any] =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173 |
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =int(lowercase )
# Initialize Result
SCREAMING_SNAKE_CASE_: str =[]
# Traverse through all denomination
for denomination in reversed(lowercase ):
# Find denominations
while int(lowercase ) >= int(lowercase ):
total_value -= int(lowercase )
answer.append(lowercase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_UpperCAmelCase = []
_UpperCAmelCase = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
_UpperCAmelCase = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
_UpperCAmelCase = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
_UpperCAmelCase = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
_UpperCAmelCase = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f"""Following is minimal change for {value}: """)
_UpperCAmelCase = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 173 | 1 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase__ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCamelCase__ = 10
lowerCamelCase__ = 256
def lowerCAmelCase__ ( a__ ) ->Optional[MinHash]:
'''simple docstring'''
if len(a__ ) < MIN_NUM_TOKENS:
return None
_UpperCamelCase = MinHash(num_perm=a__ )
for token in set(a__ ):
min_hash.update(token.encode() )
return min_hash
def lowerCAmelCase__ ( a__ ) ->Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(a__ ) if len(t.strip() ) > 0}
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , *,
lowercase_ : float = 0.85 , ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = duplication_jaccard_threshold
_UpperCamelCase = NUM_PERM
_UpperCamelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
_UpperCamelCase = defaultdict(lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Tuple , lowercase_ : MinHash) -> None:
"""simple docstring"""
_UpperCamelCase = self._index.query(lowercase_)
if code_key in self._index.keys:
print(f'Duplicate key {code_key}')
return
self._index.insert(lowercase_ , lowercase_)
if len(lowercase_) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase_)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase_)
def __UpperCAmelCase ( self : List[str]) -> List[List[Dict]]:
"""simple docstring"""
_UpperCamelCase = []
for base, duplicates in self._duplicate_clusters.items():
_UpperCamelCase = [base] + list(lowercase_)
# reformat the cluster to be a list of dict
_UpperCamelCase = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(lowercase_)
return duplicate_clusters
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int]) -> None:
"""simple docstring"""
_UpperCamelCase = self.get_duplicate_clusters()
with open(lowercase_ , "w") as f:
json.dump(lowercase_ , lowercase_)
def lowerCAmelCase__ ( a__ ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = element
_UpperCamelCase = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(a__ , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowerCAmelCase__ ( a__ , a__ ) ->List[str]:
'''simple docstring'''
_UpperCamelCase = DuplicationIndex(duplication_jaccard_threshold=a__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(a__ ) ) , max_queue_size=100 ) ):
di.add(a__ , a__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase__ ( a__ , a__ ) ->float:
'''simple docstring'''
_UpperCamelCase = get_tokens(a__ )
_UpperCamelCase = get_tokens(a__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase__ = None
def lowerCAmelCase__ ( a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = []
for elementa in cluster:
_UpperCamelCase = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
_UpperCamelCase = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(a__ , a__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_UpperCamelCase = 1
extremes.append(a__ )
return extremes
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->str:
'''simple docstring'''
global _shared_dataset
_UpperCamelCase = dataset
_UpperCamelCase = []
_UpperCamelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=a__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
a__ , a__ , ) , total=len(a__ ) , ):
extremes_list.append(a__ )
return extremes_list
def lowerCAmelCase__ ( a__ , a__ = 0.85 ) ->Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
_UpperCamelCase = make_duplicate_clusters(a__ , a__ )
_UpperCamelCase = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
_UpperCamelCase = {}
_UpperCamelCase = find_extremes(a__ , a__ , a__ )
for extremes in extremes_clusters:
for element in extremes:
_UpperCamelCase = element
_UpperCamelCase = duplicate_indices - set(extreme_dict.keys() )
_UpperCamelCase = dataset.filter(lambda a__ , a__ : idx not in remove_indices , with_indices=a__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_UpperCamelCase = element["base_index"] in extreme_dict
if element["is_extreme"]:
_UpperCamelCase = extreme_dict[element["base_index"]]["copies"]
print(f'Original dataset size: {len(a__ )}' )
print(f'Number of duplicate clusters: {len(a__ )}' )
print(f'Files in duplicate cluster: {len(a__ )}' )
print(f'Unique files in duplicate cluster: {len(a__ )}' )
print(f'Filtered dataset size: {len(a__ )}' )
return ds_filter, duplicate_clusters
| 63 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
lowerCamelCase__ = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase__ = {F"funnel-transformer/{name}": 512 for name in _model_names}
lowerCamelCase__ = {F"funnel-transformer/{name}": {'''do_lower_case''': True} for name in _model_names}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_INIT_CONFIGURATION
__A = FunnelTokenizer
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = 2
def __init__( self : Tuple , lowercase_ : Any=None , lowercase_ : List[Any]=None , lowercase_ : List[str]=True , lowercase_ : List[str]="<unk>" , lowercase_ : List[Any]="<sep>" , lowercase_ : int="<pad>" , lowercase_ : Dict="<cls>" , lowercase_ : int="<mask>" , lowercase_ : Any="<s>" , lowercase_ : Tuple="</s>" , lowercase_ : List[str]=True , lowercase_ : Any=True , lowercase_ : str=None , lowercase_ : Dict="##" , **lowercase_ : Optional[int] , ) -> Dict:
"""simple docstring"""
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , clean_text=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , wordpieces_prefix=lowercase_ , **lowercase_ , )
_UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , lowercase_) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase_) != tokenize_chinese_chars
):
_UpperCamelCase = getattr(lowercase_ , normalizer_state.pop("type"))
_UpperCamelCase = do_lower_case
_UpperCamelCase = strip_accents
_UpperCamelCase = tokenize_chinese_chars
_UpperCamelCase = normalizer_class(**lowercase_)
_UpperCamelCase = do_lower_case
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=None) -> str:
"""simple docstring"""
_UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0]
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_UpperCamelCase = self._tokenizer.model.save(lowercase_ , name=lowercase_)
return tuple(lowercase_)
| 63 | 1 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self : Dict , _snake_case : Optional[int] , _snake_case : Dict=13 , _snake_case : Optional[Any]=7 , _snake_case : List[str]=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Tuple=True , _snake_case : List[Any]=99 , _snake_case : int=32 , _snake_case : int=5 , _snake_case : Optional[Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : str=0.1 , _snake_case : str=512 , _snake_case : Union[str, Any]=16 , _snake_case : List[Any]=2 , _snake_case : int=0.0_2 , _snake_case : Dict=3 , _snake_case : List[str]=4 , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : Any , _snake_case : Optional[int] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : int , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = BioGptModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCamelCase ( self : Tuple , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : List[str] , _snake_case : Tuple , ):
"""simple docstring"""
UpperCAmelCase_ = BioGptForCausalLM(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase ( self : Dict , _snake_case : str , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : str , *_snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = BioGptModel(config=_snake_case)
model.to(_snake_case)
model.eval()
# create attention mask
UpperCAmelCase_ = torch.ones(input_ids.shape , dtype=torch.long , device=_snake_case)
UpperCAmelCase_ = self.seq_length // 2
UpperCAmelCase_ = 0
# first forward pass
UpperCAmelCase_ , UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
UpperCAmelCase_ = ids_tensor((1,) , _snake_case).item() + 1
UpperCAmelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
UpperCAmelCase_ = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1)
UpperCAmelCase_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_snake_case)] , dim=1 , )
# get two different outputs
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case)['''last_hidden_state''']
UpperCAmelCase_ = model(_snake_case , past_key_values=_snake_case , attention_mask=_snake_case)['''last_hidden_state''']
# select random slice
UpperCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
UpperCAmelCase_ = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-3))
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : List[str] , _snake_case : List[str] , _snake_case : List[str] , _snake_case : int , *_snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = BioGptModel(config=_snake_case).to(_snake_case).eval()
UpperCAmelCase_ = torch.ones(input_ids.shape , dtype=torch.long , device=_snake_case)
# first forward pass
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , use_cache=_snake_case)
UpperCAmelCase_ , UpperCAmelCase_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
UpperCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1)
UpperCAmelCase_ = torch.cat([attention_mask, next_attn_mask] , dim=-1)
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case)['''last_hidden_state''']
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case)[
'''last_hidden_state'''
]
# select random slice
UpperCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-3))
def lowerCamelCase ( self : Optional[int] , _snake_case : Optional[int] , _snake_case : str , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Any , *_snake_case : Dict , _snake_case : Optional[Any]=False):
"""simple docstring"""
UpperCAmelCase_ = BioGptForCausalLM(_snake_case)
model.to(_snake_case)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCAmelCase_ = model(_snake_case , labels=_snake_case)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def lowerCamelCase ( self : Tuple , _snake_case : List[Any] , *_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = BioGptModel(_snake_case)
UpperCAmelCase_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def lowerCamelCase ( self : Tuple , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : Any , _snake_case : int , *_snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = BioGptForTokenClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( a , a , a , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
UpperCAmelCase__ : str = (BioGptForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = BioGptModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , hidden_size=37)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_snake_case , gradient_checkpointing=_snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_snake_case)
@slow
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(_snake_case)
UpperCAmelCase_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
UpperCAmelCase_ = '''left'''
# Define PAD Token = EOS Token = 50256
UpperCAmelCase_ = tokenizer.eos_token
UpperCAmelCase_ = model.config.eos_token_id
# use different length sentences to test batching
UpperCAmelCase_ = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCAmelCase_ = tokenizer(_snake_case , return_tensors='''pt''' , padding=_snake_case)
UpperCAmelCase_ = inputs['''input_ids'''].to(_snake_case)
UpperCAmelCase_ = model.generate(
input_ids=_snake_case , attention_mask=inputs['''attention_mask'''].to(_snake_case) , )
UpperCAmelCase_ = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(_snake_case)
UpperCAmelCase_ = model.generate(input_ids=_snake_case)
UpperCAmelCase_ = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCAmelCase_ = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(_snake_case)
UpperCAmelCase_ = model.generate(input_ids=_snake_case , max_length=model.config.max_length - num_paddings)
UpperCAmelCase_ = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case)
UpperCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_snake_case)
UpperCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=_snake_case)
UpperCAmelCase_ = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_snake_case , _snake_case)
self.assertListEqual(_snake_case , [non_padded_sentence, padded_sentence])
@slow
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = BioGptModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = input_dict['''input_ids''']
UpperCAmelCase_ = input_ids.ne(1).to(_snake_case)
UpperCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
UpperCAmelCase_ = BioGptForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = '''multi_label_classification'''
UpperCAmelCase_ = input_dict['''input_ids''']
UpperCAmelCase_ = input_ids.ne(1).to(_snake_case)
UpperCAmelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
UpperCAmelCase_ = BioGptForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
UpperCAmelCase_ = torch.tensor([[2, 4805, 9, 656, 21]])
UpperCAmelCase_ = model(_snake_case)[0]
UpperCAmelCase_ = 42384
UpperCAmelCase_ = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , _snake_case)
UpperCAmelCase_ = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4))
@slow
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
UpperCAmelCase_ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(_snake_case)
UpperCAmelCase_ = model.generate(
**_snake_case , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=_snake_case , )
UpperCAmelCase_ = tokenizer.decode(output_ids[0] , skip_special_tokens=_snake_case)
UpperCAmelCase_ = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_snake_case , _snake_case)
| 51 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 85 | 0 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_lowercase : str = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_lowercase : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if "://" in dataset_path:
lowercase_ : Dict = dataset_path.split('''://''' )[1]
return dataset_path
def snake_case_ ( __SCREAMING_SNAKE_CASE : fsspec.AbstractFileSystem ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def snake_case_ ( __SCREAMING_SNAKE_CASE : fsspec.AbstractFileSystem , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Optional[Any] = not is_remote_filesystem(__SCREAMING_SNAKE_CASE )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__SCREAMING_SNAKE_CASE ) , fs._strip_protocol(__SCREAMING_SNAKE_CASE ) )
else:
fs.mv(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , recursive=__SCREAMING_SNAKE_CASE )
def snake_case_ ( ):
"""simple docstring"""
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowercase_ : Optional[int] = None
lowercase_ : Optional[int] = None
lowercase_ : Union[str, Any] = threading.Lock()
| 264 |
'''simple docstring'''
import qiskit
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : List[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
lowercase_ : Dict = qiskit.QuantumCircuit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowercase_ : Union[str, Any] = qiskit.execute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 264 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : str = logging.get_logger(__name__)
def _A ( lowercase , lowercase=False ):
"""simple docstring"""
a =[]
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def _A ( lowercase , lowercase , lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
a =''''''
else:
a ='''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
a =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a =in_proj_weight[
: config.hidden_size, :
]
a =in_proj_bias[: config.hidden_size]
a =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a =in_proj_weight[
-config.hidden_size :, :
]
a =in_proj_bias[-config.hidden_size :]
def _A ( lowercase ):
"""simple docstring"""
a =['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
a =dct.pop(lowercase )
a =val
def _A ( ):
"""simple docstring"""
a ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
a =Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _A ( lowercase , lowercase , lowercase=False ):
"""simple docstring"""
a =BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=lowercase , )
a =ViTHybridConfig(backbone_config=lowercase , image_size=3_84 , num_labels=10_00 )
a =False
# load original model from timm
a =timm.create_model(lowercase , pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
a =timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
a =create_rename_keys(lowercase , lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_q_k_v(lowercase , lowercase , lowercase )
a ='''huggingface/label-files'''
a ='''imagenet-1k-id2label.json'''
a =json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
a ={int(lowercase ): v for k, v in idalabel.items()}
a =idalabel
a ={v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
a =ViTHybridModel(lowercase ).eval()
else:
a =ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
a =create_transform(**resolve_data_config({} , model=lowercase ) )
a =transform.transforms
a ={
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
a =ViTHybridImageProcessor(
do_resize=lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
a =prepare_img()
a =transform(lowercase ).unsqueeze(0 )
a =processor(lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(lowercase , lowercase )
# verify logits
with torch.no_grad():
a =model(lowercase )
a =outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
a =timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase , outputs.pooler_output , atol=1E-3 )
else:
a =timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
lowerCamelCase_ : str = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub) | 81 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a_ : List[str] = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
a_ : Any = {
"yjernite/retribert-base-uncased": 5_1_2,
}
a_ : Tuple = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = RetriBertTokenizer
_lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__="[UNK]" , __magic_name__="[SEP]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> Tuple:
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __magic_name__ ) != do_lower_case
or normalizer_state.get('strip_accents' , __magic_name__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __magic_name__ ) != tokenize_chinese_chars
):
_a = getattr(__magic_name__ , normalizer_state.pop('type' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**__magic_name__ )
_a = do_lower_case
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=None ) -> Union[str, Any]:
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 168 | 0 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __a ( __lowerCamelCase, __lowerCamelCase = True, __lowerCamelCase = math.inf, __lowerCamelCase = -math.inf, __lowerCamelCase = math.inf, __lowerCamelCase = -math.inf, __lowerCamelCase = False, __lowerCamelCase = 100, __lowerCamelCase = 0.01, __lowerCamelCase = 1, ):
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Any = search_prob
UpperCAmelCase_ : Any = start_temperate
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : Optional[int] = None
while not search_end:
UpperCAmelCase_ : int = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase_ : Union[str, Any] = current_state
scores.append(__lowerCamelCase )
iterations += 1
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[int] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase_ : List[str] = random.randint(0, len(__lowerCamelCase ) - 1 ) # picking a random neighbor
UpperCAmelCase_ : List[Any] = neighbors.pop(__lowerCamelCase )
UpperCAmelCase_ : Dict = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase_ : Optional[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase_ : Optional[Any] = picked_neighbor
else:
UpperCAmelCase_ : str = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase_ : str = picked_neighbor
UpperCAmelCase_ : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase_ : Optional[int] = True
else:
UpperCAmelCase_ : List[str] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__lowerCamelCase ), __lowerCamelCase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def __a ( __lowerCamelCase, __lowerCamelCase ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_a = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_a = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
_a = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_a = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
return (3 * x**2) - (6 * y)
_a = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_a = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"""{local_min.score()}"""
)
_a = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_a = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"""{local_min.score()}"""
)
| 363 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = "ylacombe/bark-small"
UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase_ : List[str] = "en_speaker_1"
UpperCAmelCase_ : Tuple = "This is a test string"
UpperCAmelCase_ : List[Any] = "speaker_embeddings_path.json"
UpperCAmelCase_ : Any = "speaker_embeddings"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase_ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase_ : int = 35
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : List[Any] = 8
UpperCAmelCase_ : Optional[Any] = {
"semantic_prompt": np.ones(lowercase_ ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : Dict = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , "file.npz" )
np.savez(lowercase_ , **lowercase_ )
UpperCAmelCase_ : Optional[int] = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
UpperCAmelCase_ : Tuple = processor(text=self.input_string )
UpperCAmelCase_ : Union[str, Any] = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 23 | 0 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class A ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
A__ = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__(self : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 5_0257 , _UpperCAmelCase : int = 1024 , _UpperCAmelCase : int = 768 , _UpperCAmelCase : int = 12 , _UpperCAmelCase : int = 12 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : str = "gelu_new" , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 1E-5 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , ) -> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
lowercase__ = prefix_inner_dim
lowercase__ = prefix_hidden_dim
lowercase__ = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowercase__ = (
nn.Linear(self.prefix_hidden_dim , _UpperCAmelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowercase__ = GPTaConfig(
vocab_size=_UpperCAmelCase , n_positions=_UpperCAmelCase , n_embd=_UpperCAmelCase , n_layer=_UpperCAmelCase , n_head=_UpperCAmelCase , n_inner=_UpperCAmelCase , activation_function=_UpperCAmelCase , resid_pdrop=_UpperCAmelCase , embd_pdrop=_UpperCAmelCase , attn_pdrop=_UpperCAmelCase , layer_norm_epsilon=_UpperCAmelCase , initializer_range=_UpperCAmelCase , scale_attn_weights=_UpperCAmelCase , use_cache=_UpperCAmelCase , scale_attn_by_inverse_layer_idx=_UpperCAmelCase , reorder_and_upcast_attn=_UpperCAmelCase , )
lowercase__ = GPTaLMHeadModel(_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : Optional[torch.Tensor] = None , _UpperCAmelCase : Optional[torch.Tensor] = None , ) -> str:
"""simple docstring"""
lowercase__ = self.transformer.transformer.wte(_UpperCAmelCase )
lowercase__ = self.encode_prefix(_UpperCAmelCase )
lowercase__ = self.decode_prefix(_UpperCAmelCase )
lowercase__ = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowercase__ = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowercase__ = torch.cat((dummy_token, input_ids) , dim=1 )
lowercase__ = self.transformer(inputs_embeds=_UpperCAmelCase , labels=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase__ (self : int , _UpperCAmelCase : int , _UpperCAmelCase : torch.device ) -> torch.Tensor:
"""simple docstring"""
return torch.zeros(_UpperCAmelCase , self.prefix_length , dtype=torch.intaa , device=_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.encode_prefix(_UpperCAmelCase )
@torch.no_grad()
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> Tuple:
"""simple docstring"""
lowercase__ = torch.split(_UpperCAmelCase , 1 , dim=0 )
lowercase__ = []
lowercase__ = []
for feature in features:
lowercase__ = self.decode_prefix(feature.to(_UpperCAmelCase ) ) # back to the clip feature
# Only support beam search for now
lowercase__ , lowercase__ = self.generate_beam(
input_embeds=_UpperCAmelCase , device=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowercase__ = torch.stack(_UpperCAmelCase )
lowercase__ = torch.stack(_UpperCAmelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : int=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : int = 5 , _UpperCAmelCase : int = 67 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : Optional[int] = None , ) -> List[Any]:
"""simple docstring"""
lowercase__ = eos_token_id
lowercase__ = None
lowercase__ = None
lowercase__ = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase , dtype=torch.int )
lowercase__ = torch.zeros(_UpperCAmelCase , device=_UpperCAmelCase , dtype=torch.bool )
if input_embeds is not None:
lowercase__ = input_embeds
else:
lowercase__ = self.transformer.transformer.wte(_UpperCAmelCase )
for i in range(_UpperCAmelCase ):
lowercase__ = self.transformer(inputs_embeds=_UpperCAmelCase )
lowercase__ = outputs.logits
lowercase__ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowercase__ = logits.softmax(-1 ).log()
if scores is None:
lowercase__ , lowercase__ = logits.topk(_UpperCAmelCase , -1 )
lowercase__ = generated.expand(_UpperCAmelCase , *generated.shape[1:] )
lowercase__ , lowercase__ = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowercase__ = next_tokens
else:
lowercase__ = tokens.expand(_UpperCAmelCase , *tokens.shape[1:] )
lowercase__ = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowercase__ = -float(np.inf )
lowercase__ = 0
lowercase__ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowercase__ = scores_sum / seq_lengths[:, None]
lowercase__ , lowercase__ = scores_sum_average.view(-1 ).topk(_UpperCAmelCase , -1 )
lowercase__ = next_tokens // scores_sum.shape[1]
lowercase__ = seq_lengths[next_tokens_source]
lowercase__ = next_tokens % scores_sum.shape[1]
lowercase__ = next_tokens.unsqueeze(1 )
lowercase__ = tokens[next_tokens_source]
lowercase__ = torch.cat((tokens, next_tokens) , dim=1 )
lowercase__ = generated[next_tokens_source]
lowercase__ = scores_sum_average * seq_lengths
lowercase__ = is_stopped[next_tokens_source]
lowercase__ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowercase__ = torch.cat((generated, next_token_embed) , dim=1 )
lowercase__ = is_stopped + next_tokens.eq(_UpperCAmelCase ).squeeze()
if is_stopped.all():
break
lowercase__ = scores / seq_lengths
lowercase__ = scores.argsort(descending=_UpperCAmelCase )
# tokens tensors are already padded to max_seq_length
lowercase__ = [tokens[i] for i in order]
lowercase__ = torch.stack(_UpperCAmelCase , dim=0 )
lowercase__ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 305 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCamelCase ( __magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCamelCase ( __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = gather(__magic_name__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCamelCase ( __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = [state.process_index]
lowercase__ = gather_object(__magic_name__ )
assert len(__magic_name__ ) == state.num_processes, f'''{gathered_obj}, {len(__magic_name__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = broadcast(__magic_name__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
if state.is_main_process:
lowercase__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowercase__ = torch.arange(state.num_processes ).to(state.device )
lowercase__ = pad_across_processes(__magic_name__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
if state.num_processes != 2:
return
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = reduce(__magic_name__ , """sum""" )
lowercase__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( __magic_name__ : Dict ) -> int:
"""simple docstring"""
if state.num_processes != 2:
return
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = reduce(__magic_name__ , """mean""" )
lowercase__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
main()
def UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowercase__ = PartialState()
state.print(f'''State: {state}''' )
state.print("""testing gather""" )
test_gather(__magic_name__ )
state.print("""testing gather_object""" )
test_gather_object(__magic_name__ )
state.print("""testing broadcast""" )
test_broadcast(__magic_name__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(__magic_name__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(__magic_name__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(__magic_name__ )
if __name__ == "__main__":
main()
| 305 | 1 |
'''simple docstring'''
lowerCAmelCase__ : int = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowerCAmelCase__ : List[str] = {value: key for key, value in MORSE_CODE_DICT.items()}
def __UpperCamelCase ( _UpperCAmelCase ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __UpperCamelCase ( _UpperCAmelCase ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def __UpperCamelCase ( ):
__UpperCAmelCase : Union[str, Any] = "Morse code here!"
print(_UpperCAmelCase )
__UpperCAmelCase : Dict = encrypt(_UpperCAmelCase )
print(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = decrypt(_UpperCAmelCase )
print(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 351 |
'''simple docstring'''
def __UpperCamelCase ( _UpperCAmelCase ):
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
__UpperCAmelCase : List[str] = 4
__UpperCAmelCase : int = (1 << p) - 1
for _ in range(p - 2 ):
__UpperCAmelCase : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 37 | 0 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowerCAmelCase__ = '''path-to-your-trained-model'''
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase__ = pipe.to(device)
# to channels last
lowerCAmelCase__ = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase__ = torch.randn(2, 4, 64, 64)
lowerCAmelCase__ = torch.rand(1) * 999
lowerCAmelCase__ = torch.randn(2, 77, 768)
lowerCAmelCase__ = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase__ = 666
lowerCAmelCase__ = torch.Generator(device).manual_seed(seed)
lowerCAmelCase__ = {'''generator''': generator}
if args.steps is not None:
lowerCAmelCase__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 108 | import csv
import tweepy
# Twitter API credentials
__a : Union[str, Any] = """"""
__a : Union[str, Any] = """"""
__a : Union[str, Any] = """"""
__a : List[Any] = """"""
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = tweepy.OAuthHandler(lowercase , lowercase )
auth.set_access_token(lowercase , lowercase )
__lowercase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
__lowercase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowercase = api.user_timeline(screen_name=lowercase , count=200 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
__lowercase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
__lowercase = api.user_timeline(
screen_name=lowercase , count=200 , max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
__lowercase = alltweets[-1].id - 1
print(F"...{len(lowercase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
__lowercase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , '''w''' ) as f:
__lowercase = csv.writer(lowercase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""") | 210 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class a ( __lowerCamelCase ):
__lowerCAmelCase : str = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__lowerCAmelCase : ClassVar[Features] = Features({"""image""": Image()} )
__lowerCAmelCase : ClassVar[Features] = Features({"""labels""": ClassLabel} )
__lowerCAmelCase : str = "image"
__lowerCAmelCase : str = "labels"
def __lowerCamelCase ( self :List[Any] ,__lowercase :Optional[int] ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] ,__lowercase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
snake_case__ : int = copy.deepcopy(self )
snake_case__ : str = self.label_schema.copy()
snake_case__ : Optional[int] = features[self.label_column]
snake_case__ : Dict = label_schema
return task_template
@property
def __lowerCamelCase ( self :Optional[int] ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 44 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
A__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A__ = 12_8022
A__ = 12_8028
@require_sentencepiece
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = MaMaaaTokenizer
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Any = False
__lowerCAmelCase : Union[str, Any] = True
def __lowerCamelCase ( self :int ):
super().setUp()
snake_case__ : Tuple = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
snake_case__ : Optional[Any] = dict(zip(__lowercase ,range(len(__lowercase ) ) ) )
snake_case__ : List[Any] = Path(self.tmpdirname )
save_json(__lowercase ,save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__lowercase ,save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
snake_case__ : str = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self :Optional[int] ,**__lowercase :Optional[int] ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname ,**__lowercase )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Tuple ):
return (
"This is a test",
"This is a test",
)
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Tuple = '''</s>'''
snake_case__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) ,__lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) ,__lowercase )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : Union[str, Any] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''</s>''' )
self.assertEqual(vocab_keys[1] ,'''<unk>''' )
self.assertEqual(vocab_keys[-1] ,'''<s>''' )
self.assertEqual(len(__lowercase ) ,tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def __lowerCamelCase ( self :List[Any] ):
pass
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : List[Any] = self.get_tokenizer()
snake_case__ : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) ,[2, 3, 4, 5, 6] ,)
snake_case__ : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(__lowercase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_string(__lowercase )
self.assertEqual(__lowercase ,'''This is a test''' )
@slow
def __lowerCamelCase ( self :Union[str, Any] ):
# fmt: off
snake_case__ : Tuple = {'''input_ids''': [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase ,model_name='''facebook/m2m100_418M''' ,revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = """facebook/m2m100_418M"""
__lowerCAmelCase : Union[str, Any] = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__lowerCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__lowerCAmelCase : Dict = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def __lowerCamelCase ( cls :Union[str, Any] ):
snake_case__ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='''en''' ,tgt_lang='''fr''' )
snake_case__ : Union[str, Any] = 1
return cls
def __lowerCamelCase ( self :Tuple ):
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) ,1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) ,1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) ,1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) ,1_2_8_0_6_3 )
def __lowerCamelCase ( self :Any ):
snake_case__ : Optional[int] = self.tokenizer.get_vocab()
self.assertEqual(len(__lowercase ) ,self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] ,3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Optional[int] = '''en'''
snake_case__ : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,__lowercase )
def __lowerCamelCase ( self :List[Any] ):
self.assertIn(__lowercase ,self.tokenizer.all_special_ids )
# fmt: off
snake_case__ : int = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
snake_case__ : Tuple = self.tokenizer.decode(__lowercase ,skip_special_tokens=__lowercase )
snake_case__ : Optional[int] = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=__lowercase )
self.assertEqual(__lowercase ,__lowercase )
self.assertNotIn(self.tokenizer.eos_token ,__lowercase )
def __lowerCamelCase ( self :Any ):
snake_case__ : List[Any] = tempfile.mkdtemp()
snake_case__ : List[Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__lowercase )
snake_case__ : Any = MaMaaaTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.lang_token_to_id ,__lowercase )
@require_torch
def __lowerCamelCase ( self :str ):
snake_case__ : Dict = '''en'''
snake_case__ : List[Any] = '''fr'''
snake_case__ : Union[str, Any] = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=__lowercase ,return_tensors='''pt''' )
snake_case__ : Optional[int] = shift_tokens_right(
batch['''labels'''] ,self.tokenizer.pad_token_id ,self.tokenizer.eos_token_id )
for k in batch:
snake_case__ : Optional[int] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[Any] = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
snake_case__ : Any = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
@require_torch
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Union[str, Any] = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case__ : List[str] = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCamelCase ( self :Tuple ):
snake_case__ : str = self.tokenizer._build_translation_inputs('''A test''' ,return_tensors='''pt''' ,src_lang='''en''' ,tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(__lowercase ) ,{
# en_XX, A, test, EOS
'''input_ids''': [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 1_2_8_0_0_6,
} ,)
| 44 | 1 |
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase ) -> bool:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__UpperCAmelCase )
if number < 0:
return False
lowerCAmelCase__ : int = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_A = logging.get_logger(__name__)
class _lowerCamelCase :
def __init__( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : int ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = question_encoder
lowerCAmelCase__ : Optional[int] = generator
lowerCAmelCase__ : Optional[int] = self.question_encoder
def _lowerCAmelCase ( self : Dict , UpperCamelCase : Optional[Any] ) -> str:
"""simple docstring"""
if os.path.isfile(UpperCamelCase ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
lowerCAmelCase__ : Dict = os.path.join(UpperCamelCase , """question_encoder_tokenizer""" )
lowerCAmelCase__ : List[Any] = os.path.join(UpperCamelCase , """generator_tokenizer""" )
self.question_encoder.save_pretrained(UpperCamelCase )
self.generator.save_pretrained(UpperCamelCase )
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] , UpperCamelCase : List[str] , **UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowerCAmelCase__ : Dict = kwargs.pop("""config""" , UpperCamelCase )
if config is None:
lowerCAmelCase__ : int = RagConfig.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained(
UpperCamelCase , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained(
UpperCamelCase , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=UpperCamelCase , generator=UpperCamelCase )
def __call__( self : Dict , *UpperCamelCase : List[Any] , **UpperCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
return self.current_tokenizer(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Dict , *UpperCamelCase : Tuple , **UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return self.generator.batch_decode(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
return self.generator.decode(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.question_encoder
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.generator
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : List[str] , UpperCamelCase : Optional[List[str]] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : str = "longest" , UpperCamelCase : str = None , UpperCamelCase : bool = True , **UpperCamelCase : Union[str, Any] , ) -> BatchEncoding:
"""simple docstring"""
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , UpperCamelCase , )
if max_length is None:
lowerCAmelCase__ : Any = self.current_tokenizer.model_max_length
lowerCAmelCase__ : Tuple = self(
UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , max_length=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , **UpperCamelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCAmelCase__ : Tuple = self.current_tokenizer.model_max_length
lowerCAmelCase__ : Tuple = self(
text_target=UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , padding=UpperCamelCase , max_length=UpperCamelCase , truncation=UpperCamelCase , **UpperCamelCase , )
lowerCAmelCase__ : Any = labels["""input_ids"""]
return model_inputs
| 242 | 1 |
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Union[str, Any] = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class __magic_name__ ( snake_case ):
UpperCamelCase_ :List[Any] = """bridgetower_vision_model"""
def __init__( self , _lowercase=768 , _lowercase=12 , _lowercase=3 , _lowercase=16 , _lowercase=288 , _lowercase=1 , _lowercase=1e-0_5 , _lowercase=False , _lowercase=True , _lowercase=False , **_lowercase , )-> Optional[Any]:
super().__init__(**_lowercase )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_channels
UpperCamelCase_ = patch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = initializer_factor
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = stop_gradient
UpperCamelCase_ = share_layernorm
UpperCamelCase_ = remove_last_layer
@classmethod
def UpperCAmelCase_ ( cls , _lowercase , **_lowercase )-> "PretrainedConfig":
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(_lowercase , **_lowercase )
if config_dict.get("model_type" ) == "bridgetower":
UpperCamelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowercase , **_lowercase )
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Optional[int] = """bridgetower_text_model"""
def __init__( self , _lowercase=50_265 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=1 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=514 , _lowercase=1 , _lowercase=1e-0_5 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase="absolute" , _lowercase=True , **_lowercase , )-> Optional[int]:
super().__init__(**_lowercase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = initializer_factor
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = use_cache
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
UpperCamelCase_ = eos_token_id
@classmethod
def UpperCAmelCase_ ( cls , _lowercase , **_lowercase )-> "PretrainedConfig":
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(_lowercase , **_lowercase )
if config_dict.get("model_type" ) == "bridgetower":
UpperCamelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowercase , **_lowercase )
class __magic_name__ ( snake_case ):
UpperCamelCase_ :List[Any] = """bridgetower"""
def __init__( self , _lowercase=True , _lowercase="gelu" , _lowercase=768 , _lowercase=1 , _lowercase=1e-0_5 , _lowercase=False , _lowercase="add" , _lowercase=12 , _lowercase=6 , _lowercase=False , _lowercase=False , _lowercase=None , _lowercase=None , **_lowercase , )-> List[Any]:
# TODO: remove this once the Hub files are updated.
UpperCamelCase_ = kwargs.pop("text_config_dict" , _lowercase )
UpperCamelCase_ = kwargs.pop("vision_config_dict" , _lowercase )
super().__init__(**_lowercase )
UpperCamelCase_ = share_cross_modal_transformer_layers
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_size
UpperCamelCase_ = initializer_factor
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = share_link_tower_layers
UpperCamelCase_ = link_tower_type
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = tie_word_embeddings
UpperCamelCase_ = init_layernorm_from_vision_encoder
if text_config is None:
UpperCamelCase_ = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
UpperCamelCase_ = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
UpperCamelCase_ = BridgeTowerTextConfig(**_lowercase )
UpperCamelCase_ = BridgeTowerVisionConfig(**_lowercase )
@classmethod
def UpperCAmelCase_ ( cls , _lowercase , _lowercase , **_lowercase )-> List[str]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowercase )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.text_config.to_dict()
UpperCamelCase_ = self.vision_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 60 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : List[str] , A : Any , A : Union[str, Any]=7 , A : str=3 , A : List[Any]=18 , A : Any=30 , A : Optional[int]=4_00 , A : List[str]=True , A : Dict=None , A : Any=True , A : List[str]=None , A : Dict=True , A : Tuple=[0.5, 0.5, 0.5] , A : int=[0.5, 0.5, 0.5] , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( a__ , unittest.TestCase ):
UpperCamelCase = LevitImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = LevitImageProcessingTester(self)
@property
def _lowerCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__UpperCAmelCase , 'image_mean'))
self.assertTrue(hasattr(__UpperCAmelCase , 'image_std'))
self.assertTrue(hasattr(__UpperCAmelCase , 'do_normalize'))
self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize'))
self.assertTrue(hasattr(__UpperCAmelCase , 'do_center_crop'))
self.assertTrue(hasattr(__UpperCAmelCase , 'size'))
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18})
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18})
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'shortest_edge': 42})
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84})
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCamelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 339 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Dict = 'unispeech'
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=320 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=100 , __UpperCAmelCase=256 , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=0.5 , **__UpperCAmelCase , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(__UpperCAmelCase )
_a = list(__UpperCAmelCase )
_a = list(__UpperCAmelCase )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = num_ctc_classes
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
_a = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# pretraining loss
_a = replace_prob
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 320 | 0 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__magic_name__: List[str] = logging.getLogger(__name__)
@dataclass(frozen=_lowerCAmelCase )
class snake_case__ :
lowercase__ : str
lowercase__ : str
lowercase__ : Optional[str] = None
lowercase__ : Optional[str] = None
lowercase__ : Optional[str] = None
@dataclass(frozen=_lowerCAmelCase )
class snake_case__ :
lowercase__ : List[int]
lowercase__ : Optional[List[int]] = None
lowercase__ : Optional[List[int]] = None
lowercase__ : Optional[Union[int, float]] = None
lowercase__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[InputFeatures]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__=False , lowerCAmelCase__ = False , ) -> Dict:
__magic_name__ : Dict = hans_processors[task]()
__magic_name__ : Dict = os.path.join(
lowerCAmelCase__ , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(lowerCAmelCase__ ) , lowerCAmelCase__ , ) , )
__magic_name__ : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__magic_name__ ,__magic_name__ : int = label_list[2], label_list[1]
__magic_name__ : Tuple = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__magic_name__ : Tuple = cached_features_file + """.lock"""
with FileLock(lowerCAmelCase__ ):
if os.path.exists(lowerCAmelCase__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
__magic_name__ : Any = torch.load(lowerCAmelCase__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
__magic_name__ : List[str] = (
processor.get_dev_examples(lowerCAmelCase__ ) if evaluate else processor.get_train_examples(lowerCAmelCase__ )
)
logger.info("""Training examples: %s""" , len(lowerCAmelCase__ ) )
__magic_name__ : Dict = hans_convert_examples_to_features(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
logger.info("""Saving features into cached file %s""" , lowerCAmelCase__ )
torch.save(self.features , lowerCAmelCase__ )
def __len__( self ) -> Optional[Any]:
return len(self.features )
def __getitem__( self , lowerCAmelCase__ ) -> InputFeatures:
return self.features[i]
def __magic_name__ ( self ) -> Optional[int]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class snake_case__ :
lowercase__ : List[InputFeatures]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1_28 , lowerCAmelCase__=False , lowerCAmelCase__ = False , ) -> str:
__magic_name__ : Optional[Any] = hans_processors[task]()
__magic_name__ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__magic_name__ ,__magic_name__ : Any = label_list[2], label_list[1]
__magic_name__ : List[Any] = label_list
__magic_name__ : str = processor.get_dev_examples(lowerCAmelCase__ ) if evaluate else processor.get_train_examples(lowerCAmelCase__ )
__magic_name__ : Dict = hans_convert_examples_to_features(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(lowerCAmelCase__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__magic_name__ : Dict = tf.data.Dataset.from_generator(
lowerCAmelCase__ , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def __magic_name__ ( self ) -> Any:
return self.dataset
def __len__( self ) -> Dict:
return len(self.features )
def __getitem__( self , lowerCAmelCase__ ) -> InputFeatures:
return self.features[i]
def __magic_name__ ( self ) -> Optional[int]:
return self.label_list
class snake_case__ ( _lowerCAmelCase ):
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase__ , """heuristics_train_set.txt""" ) ) , """train""" )
def __magic_name__ ( self , lowerCAmelCase__ ) -> int:
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase__ , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def __magic_name__ ( self ) -> List[str]:
return ["contradiction", "entailment", "neutral"]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
__magic_name__ : Tuple = []
for i, line in enumerate(lowerCAmelCase__ ):
if i == 0:
continue
__magic_name__ : List[str] = """%s-%s""" % (set_type, line[0])
__magic_name__ : Any = line[5]
__magic_name__ : List[str] = line[6]
__magic_name__ : List[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
__magic_name__ : List[Any] = line[0]
examples.append(InputExample(guid=lowerCAmelCase__ , text_a=lowerCAmelCase__ , text_b=lowerCAmelCase__ , label=lowerCAmelCase__ , pairID=lowerCAmelCase__ ) )
return examples
def UpperCamelCase ( _A, _A, _A, _A, ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = {label: i for i, label in enumerate(_A )}
__magic_name__ : Any = []
for ex_index, example in tqdm.tqdm(enumerate(_A ), desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
__magic_name__ : List[Any] = tokenizer(
example.text_a, example.text_b, add_special_tokens=_A, max_length=_A, padding="""max_length""", truncation=_A, return_overflowing_tokens=_A, )
__magic_name__ : List[str] = label_map[example.label] if example.label in label_map else 0
__magic_name__ : Any = int(example.pairID )
features.append(InputFeatures(**_A, label=_A, pairID=_A ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
__magic_name__: str = {
"hans": 3,
}
__magic_name__: List[Any] = {
"hans": HansProcessor,
}
| 138 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__magic_name__: Any = None
__magic_name__: Dict = logging.get_logger(__name__)
__magic_name__: Any = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__magic_name__: str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
__magic_name__: Optional[Any] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
__magic_name__: Optional[Any] = "▁"
# Segments (not really needed)
__magic_name__: List[Any] = 0
__magic_name__: Dict = 1
__magic_name__: List[str] = 2
__magic_name__: List[Any] = 3
__magic_name__: Optional[int] = 4
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Dict = VOCAB_FILES_NAMES
lowercase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[str] = '''left'''
lowercase__ : List[str] = XLNetTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<sep>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<cls>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=["<eop>", "<eod>"] , **lowerCAmelCase__ , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : List[str] = 3
__magic_name__ : str = do_lower_case
__magic_name__ : Union[str, Any] = remove_space
__magic_name__ : str = keep_accents
__magic_name__ : Tuple = vocab_file
__magic_name__ : List[Any] = False if not self.vocab_file else True
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : Any = [self.sep_token_id]
__magic_name__ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : List[str] = [self.sep_token_id]
__magic_name__ : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 138 | 1 |
"""simple docstring"""
from math import isqrt
def a_ ( lowerCamelCase ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCamelCase ) + 1 ) )
def a_ ( lowerCamelCase = 1_0**6 ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCamelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | """simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowerCAmelCase__ : int = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ : Tuple = F"""down_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : int = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ : Tuple = F"""up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : int = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ : Any = F"""up_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : List[str] = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ : List[str] = F"""down_blocks.{i}.downsamplers.0.conv."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ : Dict = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : List[Any] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ : str = 'mid_block.attentions.0.'
lowerCAmelCase__ : Union[str, Any] = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ : int = F"""mid_block.resnets.{j}."""
lowerCAmelCase__ : List[str] = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def a_ ( lowerCamelCase ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
UpperCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ : List[str] = F"""encoder.down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : List[Any] = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.downsamplers.0."""
lowerCAmelCase__ : str = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ : int = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : str = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ : Dict = F"""decoder.up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Optional[int] = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ : Any = F"""mid_block.resnets.{i}."""
lowerCAmelCase__ : Any = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def a_ ( lowerCamelCase ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase__ = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
UpperCAmelCase__ = reshape_weight_for_sd(lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowerCAmelCase__ : List[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ : int = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ : Optional[int] = {'q': 0, 'k': 1, 'v': 2}
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
UpperCAmelCase__ = k[: -len('.q_proj.weight' )]
UpperCAmelCase__ = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
UpperCAmelCase__ = k[: -len('.q_proj.bias' )]
UpperCAmelCase__ = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
return new_state_dict
def a_ ( lowerCamelCase ):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ : Tuple = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : List[str] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : int = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ : Union[str, Any] = load_file(unet_path, device='cpu')
else:
lowerCAmelCase__ : str = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : Dict = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowerCAmelCase__ : Optional[Any] = load_file(vae_path, device='cpu')
else:
lowerCAmelCase__ : Optional[int] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : List[str] = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowerCAmelCase__ : Tuple = load_file(text_enc_path, device='cpu')
else:
lowerCAmelCase__ : Any = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowerCAmelCase__ : Any = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowerCAmelCase__ : Any = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ : Dict = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ : List[Any] = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ : str = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ : List[Any] = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ : Tuple = {'transformer.' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ : List[str] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ : str = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ : Optional[Any] = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ : Optional[Any] = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ : List[Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ : int = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ : List[str] = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 98 | 1 |
class a :
def __init__( self :Optional[Any] ):
snake_case__ : str = ''''''
snake_case__ : Union[str, Any] = ''''''
snake_case__ : int = []
def __lowerCamelCase ( self :str ,__lowercase :int ,__lowercase :int ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
snake_case__ : Dict = self.__min_dist_top_down_dp(m - 1 ,n - 1 )
else:
snake_case__ : List[str] = self.__min_dist_top_down_dp(__lowercase ,n - 1 )
snake_case__ : Optional[int] = self.__min_dist_top_down_dp(m - 1 ,__lowercase )
snake_case__ : int = self.__min_dist_top_down_dp(m - 1 ,n - 1 )
snake_case__ : Union[str, Any] = 1 + min(__lowercase ,__lowercase ,__lowercase )
return self.dp[m][n]
def __lowerCamelCase ( self :Optional[int] ,__lowercase :str ,__lowercase :str ):
snake_case__ : Optional[Any] = worda
snake_case__ : Any = worda
snake_case__ : Optional[Any] = [[-1 for _ in range(len(__lowercase ) )] for _ in range(len(__lowercase ) )]
return self.__min_dist_top_down_dp(len(__lowercase ) - 1 ,len(__lowercase ) - 1 )
def __lowerCamelCase ( self :int ,__lowercase :str ,__lowercase :str ):
snake_case__ : List[str] = worda
snake_case__ : Tuple = worda
snake_case__ : Optional[Any] = len(__lowercase )
snake_case__ : Optional[int] = len(__lowercase )
snake_case__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
snake_case__ : str = j
elif j == 0: # second string is empty
snake_case__ : List[Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
snake_case__ : Optional[int] = self.dp[i - 1][j - 1]
else:
snake_case__ : Optional[Any] = self.dp[i][j - 1]
snake_case__ : List[str] = self.dp[i - 1][j]
snake_case__ : Dict = self.dp[i - 1][j - 1]
snake_case__ : Dict = 1 + min(__lowercase ,__lowercase ,__lowercase )
return self.dp[m][n]
if __name__ == "__main__":
A__ = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
A__ = input('''Enter the first string: ''').strip()
A__ = input('''Enter the second string: ''').strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 44 |
def _lowerCAmelCase ( __lowerCAmelCase ) -> int:
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
snake_case__ : List[str] = 1
snake_case__ : int = 1
while repunit:
snake_case__ : Dict = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _lowerCAmelCase ( __lowerCAmelCase = 1000000 ) -> int:
"""simple docstring"""
snake_case__ : str = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 44 | 1 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
lowerCAmelCase__ = datasets.logging.get_logger(__name__)
lowerCAmelCase__ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
lowerCAmelCase__ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
lowerCAmelCase__ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.config_name == "default":
lowerCAmelCase : Union[str, Any] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
lowerCAmelCase : str = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=False ):
"""simple docstring"""
if gpus is None:
lowerCAmelCase : int = 1 if torch.cuda.is_available() else 0
lowerCAmelCase : str = {"src": sources, "mt": predictions, "ref": references}
lowerCAmelCase : Tuple = [dict(zip(snake_case__ , snake_case__ ) ) for t in zip(*data.values() )]
lowerCAmelCase , lowerCAmelCase : Dict = self.scorer.predict(snake_case__ , gpus=snake_case__ , progress_bar=snake_case__ )
return {"mean_score": mean_score, "scores": scores}
| 108 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase ) -> None:
'''simple docstring'''
lowercase : Union[str, Any] = generate_pascal_triangle(_UpperCAmelCase )
for row_idx in range(_UpperCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def lowercase__ ( _UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
lowercase : list[list[int]] = []
for current_row_idx in range(_UpperCAmelCase ):
lowercase : Optional[Any] = populate_current_row(_UpperCAmelCase , _UpperCAmelCase )
triangle.append(_UpperCAmelCase )
return triangle
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> list[int]:
'''simple docstring'''
lowercase : Union[str, Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowercase , lowercase : List[str] = 1, 1
for current_col_idx in range(1 , _UpperCAmelCase ):
calculate_current_element(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return current_row
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> None:
'''simple docstring'''
lowercase : List[str] = triangle[current_row_idx - 1][current_col_idx - 1]
lowercase : Optional[Any] = triangle[current_row_idx - 1][current_col_idx]
lowercase : Any = above_to_left_elt + above_to_right_elt
def lowercase__ ( _UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
lowercase : list[list[int]] = [[1]]
for row_index in range(1 , _UpperCAmelCase ):
lowercase : List[str] = [0] + result[-1] + [0]
lowercase : List[Any] = row_index + 1
# Calculate the number of distinct elements in a row
lowercase : Union[str, Any] = sum(divmod(_UpperCAmelCase , 2 ) )
lowercase : Tuple = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
lowercase : str = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowercase : List[Any] = row_first_half + row_second_half
result.append(_UpperCAmelCase )
return result
def lowercase__ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase ) -> None:
lowercase : Dict = f'''{func.__name__}({value})'''
lowercase : Optional[Any] = timeit(f'''__main__.{call}''' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 255 | 0 |
"""simple docstring"""
__A = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__A = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__A = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 352 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __A (_SCREAMING_SNAKE_CASE = "" ) ->dict[str, float]:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
lowerCAmelCase__ :str = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' )
lowerCAmelCase__ :List[Any] = soup.find_all('td' , attrs='titleColumn' )
lowerCAmelCase__ :Optional[int] = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
}
def __A (_SCREAMING_SNAKE_CASE = "IMDb_Top_250_Movies.csv" ) ->None:
"""simple docstring"""
lowerCAmelCase__ :Any = get_imdb_top_aaa_movies()
with open(_SCREAMING_SNAKE_CASE , 'w' , newline='' ) as out_file:
lowerCAmelCase__ :Dict = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 254 | 0 |
'''simple docstring'''
class _lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
__lowerCAmelCase = n
__lowerCAmelCase = [None] * self.n
__lowerCAmelCase = 0 # index of the first element
__lowerCAmelCase = 0
__lowerCAmelCase = 0
def __len__( self : Union[str, Any] ) -> int:
return self.size
def a ( self : str ) -> bool:
return self.size == 0
def a ( self : List[Any] ) -> Optional[int]:
return False if self.is_empty() else self.array[self.front]
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
__lowerCAmelCase = data
__lowerCAmelCase = (self.rear + 1) % self.n
self.size += 1
return self
def a ( self : int ) -> Dict:
if self.size == 0:
raise Exception("""UNDERFLOW""" )
__lowerCAmelCase = self.array[self.front]
__lowerCAmelCase = None
__lowerCAmelCase = (self.front + 1) % self.n
self.size -= 1
return temp
| 229 | '''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def a ( self : int ) -> Optional[Any]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a ( self : List[Any] ) -> Any:
__lowerCAmelCase = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = self._create_example_records()
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(SCREAMING_SNAKE_CASE__ ):
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , example_records[i] )
def a ( self : Tuple ) -> List[str]:
__lowerCAmelCase = self._create_example_records()
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a ( self : List[str] ) -> List[str]: # checks what happens with missing columns
__lowerCAmelCase = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a ( self : Dict ) -> Optional[int]: # checks if the type can be inferred from the second record
__lowerCAmelCase = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase = Dataset.from_list([] )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 229 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__a :List[str] = TypeVar('T')
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : T ):
A_ = data
A_ = None
def __str__( self : Union[str, Any] ):
return f'''{self.data}'''
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
A_ = None
def __iter__( self : Dict ):
A_ = self.top
while node:
yield node.data
A_ = node.next
def __str__( self : Tuple ):
return "->".join([str(UpperCAmelCase ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def __A ( self : Optional[int] ):
return self.top is None
def __A ( self : Dict , UpperCAmelCase : T ):
A_ = Node(UpperCAmelCase )
if not self.is_empty():
A_ = self.top
A_ = node
def __A ( self : Any ):
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , UpperCAmelCase )
A_ = self.top
A_ = self.top.next
return pop_node.data
def __A ( self : Union[str, Any] ):
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def __A ( self : List[str] ):
A_ = None
if __name__ == "__main__":
from doctest import testmod
testmod() | 351 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__a :int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__a :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
__a :Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
__a :str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
__a :List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
__a :Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files) | 329 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 200 ) -> int:
_a = [1, 2, 5, 10, 20, 50, 100, 200]
_a = [0] * (pence + 1)
_a = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 63 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : bytes ) -> str:
return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] )
def _lowerCamelCase ( lowercase : str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowercase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase__ : Optional[int] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
lowerCamelCase__ : str = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any]=False ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = create_model(
'HTSAT-tiny' , 'roberta' , __UpperCAmelCase , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=__UpperCAmelCase , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = r'.*sequential.(\d+).*'
SCREAMING_SNAKE_CASE_ = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE_ = key.replace(__UpperCAmelCase , __UpperCAmelCase )
if re.match(__UpperCAmelCase , __UpperCAmelCase ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE_ = re.match(__UpperCAmelCase , __UpperCAmelCase ).group(1 )
SCREAMING_SNAKE_CASE_ = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(__UpperCAmelCase )//3}.linear." )
elif re.match(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = int(re.match(__UpperCAmelCase , __UpperCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE_ = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE_ = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE_ = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE_ = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE_ = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE_ = query_layer
SCREAMING_SNAKE_CASE_ = key_layer
SCREAMING_SNAKE_CASE_ = value_layer
else:
SCREAMING_SNAKE_CASE_ = value
return model_state_dict
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any]=False ) -> List[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = init_clap(__UpperCAmelCase , enable_fusion=__UpperCAmelCase )
clap_model.eval()
SCREAMING_SNAKE_CASE_ = clap_model.state_dict()
SCREAMING_SNAKE_CASE_ = rename_state_dict(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = ClapConfig()
SCREAMING_SNAKE_CASE_ = enable_fusion
SCREAMING_SNAKE_CASE_ = ClapModel(__UpperCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
transformers_config.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
lowerCamelCase__ : List[str] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion) | 210 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> int:
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE_ = version.parse(accelerate.__version__ ).base_version
if version.parse(__UpperCAmelCase ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[int] , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Optional[Any] ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *__UpperCAmelCase , **__UpperCAmelCase )
return wrapper | 210 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : str
_lowerCAmelCase : int
def __lowercase ( _a ):
if not isinstance(_a , _a ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(_a ) )]
def __lowercase ( _a ):
if not isinstance(_a , _a ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
snake_case_ : List[str] = all_rotations(_a )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
snake_case_ : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_a ),
}
return response
def __lowercase ( _a , _a ):
if not isinstance(_a , _a ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
snake_case_ : Union[str, Any] = int(_a )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(_a ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
snake_case_ : Dict = [''''''] * len(_a )
for _ in range(len(_a ) ):
for i in range(len(_a ) ):
snake_case_ : int = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase__ : str = '''Provide a string that I will generate its BWT transform: '''
lowercase__ : str = input(entry_msg).strip()
lowercase__ : Optional[int] = bwt_transform(s)
print(
f'Burrows Wheeler transform for string \'{s}\' results '
f'in \'{result["bwt_string"]}\''
)
lowercase__ : str = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
f'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
f'we get original string \'{original_string}\''
)
| 264 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] ):
snake_case_ : str = []
def _snake_case ( self : List[Any] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str] , **lowercase_ : Tuple ):
self.events.append('''on_init_end''' )
def _snake_case ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : List[str] ):
self.events.append('''on_train_begin''' )
def _snake_case ( self : Any , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] , **lowercase_ : Optional[int] ):
self.events.append('''on_train_end''' )
def _snake_case ( self : str , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[Any] , **lowercase_ : List[Any] ):
self.events.append('''on_epoch_begin''' )
def _snake_case ( self : Tuple , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ):
self.events.append('''on_epoch_end''' )
def _snake_case ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , **lowercase_ : Optional[Any] ):
self.events.append('''on_step_begin''' )
def _snake_case ( self : int , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , **lowercase_ : List[str] ):
self.events.append('''on_step_end''' )
def _snake_case ( self : str , lowercase_ : int , lowercase_ : Dict , lowercase_ : List[str] , **lowercase_ : List[str] ):
self.events.append('''on_evaluate''' )
def _snake_case ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : List[Any] , **lowercase_ : str ):
self.events.append('''on_predict''' )
def _snake_case ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , **lowercase_ : Union[str, Any] ):
self.events.append('''on_save''' )
def _snake_case ( self : str , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : Any ):
self.events.append('''on_log''' )
def _snake_case ( self : Dict , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ):
self.events.append('''on_prediction_step''' )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : List[str] ):
snake_case_ : Tuple = tempfile.mkdtemp()
def _snake_case ( self : Tuple ):
shutil.rmtree(self.output_dir )
def _snake_case ( self : int , lowercase_ : Union[str, Any]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=64 , lowercase_ : Union[str, Any]=64 , lowercase_ : Union[str, Any]=None , lowercase_ : Any=False , **lowercase_ : List[Any] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case_ : int = RegressionDataset(length=lowercase_ )
snake_case_ : Any = RegressionDataset(length=lowercase_ )
snake_case_ : int = RegressionModelConfig(a=lowercase_ , b=lowercase_ )
snake_case_ : Tuple = RegressionPreTrainedModel(lowercase_ )
snake_case_ : Any = TrainingArguments(self.output_dir , disable_tqdm=lowercase_ , report_to=[] , **lowercase_ )
return Trainer(
lowercase_ , lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , callbacks=lowercase_ , )
def _snake_case ( self : Optional[int] , lowercase_ : Any , lowercase_ : List[Any] ):
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
# Order doesn't matter
snake_case_ : Any = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
snake_case_ : List[str] = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase_ , lowercase_ ):
if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , cba.__class__ )
elif not isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(cba.__class__ , lowercase_ )
else:
self.assertEqual(lowercase_ , lowercase_ )
def _snake_case ( self : Optional[Any] , lowercase_ : Tuple ):
snake_case_ : Tuple = ['''on_init_end''', '''on_train_begin''']
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = len(trainer.get_eval_dataloader() )
snake_case_ : List[Any] = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _snake_case ( self : List[str] ):
snake_case_ : Union[str, Any] = self.get_trainer()
snake_case_ : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# Callbacks passed at init are added to the default callbacks
snake_case_ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case_ : Optional[int] = self.get_trainer(disable_tqdm=lowercase_ )
snake_case_ : List[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def _snake_case ( self : int ):
snake_case_ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case_ : List[Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
snake_case_ : Dict = self.get_trainer()
snake_case_ : Optional[int] = trainer.pop_callback(lowercase_ )
self.assertEqual(cb.__class__ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# We can also add, pop, or remove by instance
snake_case_ : Optional[int] = self.get_trainer()
snake_case_ : List[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
snake_case_ : List[Any] = self.get_trainer()
snake_case_ : Optional[int] = trainer.callback_handler.callbacks[0]
snake_case_ : Optional[Any] = trainer.pop_callback(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def _snake_case ( self : List[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=lowercase_ )
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# Independent log/save/eval
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case_ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
snake_case_ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# A bit of everything
snake_case_ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
snake_case_ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowercase_ ) in warn_mock.call_args[0][0]
| 264 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : list[list[int]], __snake_case : int, __snake_case : int, __snake_case : list[int] ) -> Tuple:
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __lowerCamelCase ( __snake_case : list[list[int]], __snake_case : list[int], __snake_case : int ) -> Dict:
"""simple docstring"""
if curr_ind == len(a_ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0, len(a_ ) ):
if valid_connection(a_, a_, a_, a_ ):
# Insert current vertex into path as next transition
A__ : Optional[Any] =next_ver
# Validate created path
if util_hamilton_cycle(a_, a_, curr_ind + 1 ):
return True
# Backtrack
A__ : Optional[int] =-1
return False
def __lowerCamelCase ( __snake_case : list[list[int]], __snake_case : int = 0 ) -> Optional[int]:
"""simple docstring"""
A__ : int =[-1] * (len(a_ ) + 1)
# initialize start and end of path with starting index
A__ : Union[str, Any] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(a_, a_, 1 ) else []
| 367 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
A__ : Any =num_of_nodes
A__ : list[list[int]] =[]
A__ : dict[int, int] ={}
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : int , lowerCAmelCase_ : int ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
A__ : str =self.find_component(lowerCAmelCase_ )
def lowercase__ ( self : int , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
A__ : int =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
A__ : List[str] =self.find_component(lowerCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCAmelCase_ )
def lowercase__ ( self : str ) -> None:
'''simple docstring'''
A__ : Union[str, Any] =[]
A__ : List[str] =0
A__ : list[Any] =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
A__ : List[str] =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
A__ , A__ , A__ : Any =edge
A__ : Tuple =self.m_component[u]
A__ : Optional[Any] =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
A__ : Optional[int] =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ , A__ , A__ : Tuple =edge
A__ : Any =self.m_component[u]
A__ : Optional[Any] =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
A__ : int =[-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase ={
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334 |
'''simple docstring'''
from manim import *
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase : Tuple = [mem.copy() for i in range(6 )]
UpperCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase : Dict = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Any = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Union[str, Any] = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Optional[Any] = Text('''CPU''' , font_size=24 )
UpperCAmelCase : Union[str, Any] = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase : Union[str, Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : List[str] = Text('''GPU''' , font_size=24 )
UpperCAmelCase : Dict = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
UpperCAmelCase : int = [mem.copy() for i in range(6 )]
UpperCAmelCase : Union[str, Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : List[str] = Text('''Model''' , font_size=24 )
UpperCAmelCase : Tuple = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
UpperCAmelCase : Any = []
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
cpu_targs.append(__snake_case )
UpperCAmelCase : int = [mem.copy() for i in range(6 )]
UpperCAmelCase : int = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Any = Text('''Loaded Checkpoint''' , font_size=24 )
UpperCAmelCase : Union[str, Any] = Group(__snake_case , __snake_case ).arrange(__snake_case , aligned_edge=__snake_case , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase : str = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
UpperCAmelCase : Tuple = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase : List[Any] = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) , Write(__snake_case ) )
self.play(Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
UpperCAmelCase : Tuple = []
UpperCAmelCase : int = []
for i, rect in enumerate(__snake_case ):
UpperCAmelCase : Any = fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
first_animations.append(GrowFromCenter(__snake_case , run_time=1 ) )
UpperCAmelCase : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(*__snake_case )
self.wait()
| 23 | 0 |
def __magic_name__ ( __a : list ):
'''simple docstring'''
UpperCamelCase__ = len(__a )
for _ in range(__a ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
UpperCamelCase__ , UpperCamelCase__ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
lowerCamelCase_ = list(range(10, 0, -1))
print(f'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 178 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __A( unittest.TestCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=18 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=True , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size_divisor
UpperCamelCase__ = do_rescale
def UpperCAmelCase_ (self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __A( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = GLPNImageProcessor if is_vision_available() else None
def UpperCAmelCase_ (self ):
UpperCamelCase__ = GLPNImageProcessingTester(self )
@property
def UpperCAmelCase_ (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size_divisor""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """resample""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_rescale""" ) )
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCAmelCase_ (self ):
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCAmelCase_ (self ):
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 178 | 1 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_a , **_a ):
super().__init__(*_a , **_a )
requires_backends(self , '''vision''' )
self.check_model_type(_a )
def __call__( self , _a , **_a ):
return super().__call__(_a , **_a )
def __UpperCAmelCase ( self , **_a ):
return {}, {}, {}
def __UpperCAmelCase ( self , _a ):
__a = load_image(_a )
__a = image.size
__a = self.image_processor(images=_a , return_tensors=self.framework )
return model_inputs
def __UpperCAmelCase ( self , _a ):
__a = self.model(**_a )
return model_outputs
def __UpperCAmelCase ( self , _a ):
__a = model_outputs.predicted_depth
__a = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=_a )
__a = prediction.squeeze().cpu().numpy()
__a = (output * 255 / np.max(_a )).astype('''uint8''' )
__a = Image.fromarray(_a )
__a = {}
__a = predicted_depth
__a = depth
return output_dict
| 45 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''vocab_file''': '''spiece.model'''}
_lowerCAmelCase = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
_lowerCAmelCase = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
_lowerCAmelCase = '''▁'''
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Any = VOCAB_FILES_NAMES
__lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[MASK]" ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase__ : Tuple = (
AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ,normalized=__UpperCAmelCase )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase )
else mask_token
)
lowerCAmelCase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,)
lowerCAmelCase__ : str = do_lower_case
lowerCAmelCase__ : int = remove_space
lowerCAmelCase__ : Tuple = keep_accents
lowerCAmelCase__ : Any = vocab_file
lowerCAmelCase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return len(self.sp_model )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
lowerCAmelCase__ : Optional[Any] = self.__dict__.copy()
lowerCAmelCase__ : Optional[Any] = None
return state
def __setstate__( self ,__UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : List[str] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
if self.remove_space:
lowerCAmelCase__ : int = """ """.join(inputs.strip().split() )
else:
lowerCAmelCase__ : str = inputs
lowerCAmelCase__ : Tuple = outputs.replace("""``""" ,"""\"""" ).replace("""''""" ,"""\"""" )
if not self.keep_accents:
lowerCAmelCase__ : Any = unicodedata.normalize("""NFKD""" ,__UpperCAmelCase )
lowerCAmelCase__ : Dict = """""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
lowerCAmelCase__ : Tuple = outputs.lower()
return outputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[str] = self.preprocess_text(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase )
lowerCAmelCase__ : str = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCAmelCase__ : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase ,"""""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase__ : str = cur_pieces[1:]
else:
lowerCAmelCase__ : int = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
return self.sp_model.PieceToId(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Tuple = """"""
lowerCAmelCase__ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : List[str] = []
else:
current_sub_tokens.append(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : int = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : List[str] = [self.sep_token_id]
lowerCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : int = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase ,"""wb""" ) as fi:
lowerCAmelCase__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 37 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case : int = logging.get_logger(__name__)
snake_case : Optional[int] = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'table-transformer'
UpperCamelCase__ = ['past_key_values']
UpperCamelCase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _a=True , _a=None , _a=3 , _a=100 , _a=6 , _a=2_048 , _a=8 , _a=6 , _a=2_048 , _a=8 , _a=0.0 , _a=0.0 , _a=True , _a="relu" , _a=256 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=1.0 , _a=False , _a="sine" , _a="resnet50" , _a=True , _a=False , _a=1 , _a=5 , _a=2 , _a=1 , _a=1 , _a=5 , _a=2 , _a=0.1 , **_a , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__magic_name__ : str = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(_a , _a ):
__magic_name__ : str = backbone_config.get("model_type" )
__magic_name__ : str = CONFIG_MAPPING[backbone_model_type]
__magic_name__ : str = config_class.from_dict(_a )
# set timm attributes to None
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = None, None, None
__magic_name__ : List[str] = use_timm_backbone
__magic_name__ : Optional[int] = backbone_config
__magic_name__ : Tuple = num_channels
__magic_name__ : int = num_queries
__magic_name__ : Optional[Any] = d_model
__magic_name__ : Tuple = encoder_ffn_dim
__magic_name__ : str = encoder_layers
__magic_name__ : Union[str, Any] = encoder_attention_heads
__magic_name__ : Optional[int] = decoder_ffn_dim
__magic_name__ : Union[str, Any] = decoder_layers
__magic_name__ : str = decoder_attention_heads
__magic_name__ : Dict = dropout
__magic_name__ : List[Any] = attention_dropout
__magic_name__ : Union[str, Any] = activation_dropout
__magic_name__ : Any = activation_function
__magic_name__ : str = init_std
__magic_name__ : Optional[Any] = init_xavier_std
__magic_name__ : List[str] = encoder_layerdrop
__magic_name__ : Optional[int] = decoder_layerdrop
__magic_name__ : Union[str, Any] = encoder_layers
__magic_name__ : Union[str, Any] = auxiliary_loss
__magic_name__ : Tuple = position_embedding_type
__magic_name__ : Dict = backbone
__magic_name__ : Any = use_pretrained_backbone
__magic_name__ : Optional[Any] = dilation
# Hungarian matcher
__magic_name__ : List[str] = class_cost
__magic_name__ : Any = bbox_cost
__magic_name__ : int = giou_cost
# Loss coefficients
__magic_name__ : Union[str, Any] = mask_loss_coefficient
__magic_name__ : List[str] = dice_loss_coefficient
__magic_name__ : Tuple = bbox_loss_coefficient
__magic_name__ : List[str] = giou_loss_coefficient
__magic_name__ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=_a , **_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self ):
return self.d_model
class _snake_case ( snake_case ):
UpperCamelCase__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def SCREAMING_SNAKE_CASE ( self ):
return 1e-5
@property
def SCREAMING_SNAKE_CASE ( self ):
return 12
| 41 |
from __future__ import annotations
snake_case : Optional[int] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class _snake_case :
def __init__( self , _a , _a ):
__magic_name__ : Any = graph
# mapping node to its parent in resulting breadth first tree
__magic_name__ : dict[str, str | None] = {}
__magic_name__ : List[str] = source_vertex
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = {self.source_vertex}
__magic_name__ : Optional[int] = None
__magic_name__ : int = [self.source_vertex] # first in first out queue
while queue:
__magic_name__ : Optional[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_a )
__magic_name__ : Dict = vertex
queue.append(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
if target_vertex == self.source_vertex:
return self.source_vertex
__magic_name__ : str = self.parent.get(_a )
if target_vertex_parent is None:
__magic_name__ : Union[str, Any] = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(_a )
return self.shortest_path(_a ) + f'''->{target_vertex}'''
if __name__ == "__main__":
snake_case : int = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 41 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = DanceDiffusionPipeline
_UpperCamelCase : int = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCamelCase : List[Any] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[int] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a__ , use_timestep_embedding=a__ , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
_lowerCAmelCase : List[str] = IPNDMScheduler()
_lowerCAmelCase : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Dict = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Optional[Any] = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def __A ( self ):
_lowerCAmelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = DanceDiffusionPipeline(**a__ )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(a__ )
_lowerCAmelCase : Optional[Any] = pipe(**a__ )
_lowerCAmelCase : int = output.audios
_lowerCAmelCase : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Dict = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __A ( self ):
return super().test_save_load_local()
@skip_mps
def __A ( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def __A ( self ):
return super().test_save_load_optional_components()
@skip_mps
def __A ( self ):
return super().test_attention_slicing_forward_pass()
def __A ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = torch_device
_lowerCAmelCase : List[str] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = pipe(generator=a__ , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Dict = output.audios
_lowerCAmelCase : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : int = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : List[str] = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
_lowerCAmelCase : str = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Dict = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=a__ , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Any = output.audios
_lowerCAmelCase : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Tuple = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 44 | """simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> List[Any]: # noqa: E741
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = [0] * n
_lowerCAmelCase : str = [False] * n
_lowerCAmelCase : str = [False] * n
def dfs(_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : Union[str, Any] = dfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : int = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : Tuple = True
else:
_lowerCAmelCase : Union[str, Any] = min(low[at] ,_lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = dfs(_lowerCamelCase ,_lowerCamelCase ,-1 ,_lowerCamelCase )
_lowerCAmelCase : List[str] = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
_a : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 44 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_) , '''Tatoeba directory does not exist.''')
class UpperCAmelCase__ ( unittest.TestCase):
@cached_property
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowercase )
@slow
def __lowerCamelCase ( self ) -> Any:
self.resolver.convert_models(["""heb-eng"""] )
@slow
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase , __UpperCamelCase = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=lowercase )
assert mmeta["long_pair"] == "heb-eng"
| 243 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {
'nielsr/canine-s': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
a__ : Any = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
a__ : List[str] = 0
a__ : int = 0Xe0_00
a__ : Any = 0Xe0_01
a__ : Union[str, Any] = 0Xe0_02
a__ : Tuple = 0Xe0_03
a__ : Tuple = 0Xe0_04
# Maps special codepoints to human-readable names.
a__ : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
a__ : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=chr(lowercase ) , lowercase=False , lowercase=2_0_4_8 , **lowercase , ) -> str:
__UpperCamelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else bos_token
__UpperCamelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else eos_token
__UpperCamelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else sep_token
__UpperCamelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else cls_token
__UpperCamelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , model_max_length=lowercase , **lowercase , )
# Creates a mapping for looking up the IDs of special symbols.
__UpperCamelCase = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__UpperCamelCase = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__UpperCamelCase = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__UpperCamelCase = UNICODE_VOCAB_SIZE
__UpperCamelCase = len(self._special_codepoints )
@property
def __lowerCamelCase ( self ) -> int:
return self._unicode_vocab_size
def __lowerCamelCase ( self , lowercase ) -> List[str]:
return list(lowercase )
def __lowerCamelCase ( self , lowercase ) -> int:
try:
return ord(lowercase )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def __lowerCamelCase ( self , lowercase ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowercase )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def __lowerCamelCase ( self , lowercase ) -> Tuple:
return "".join(lowercase )
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
__UpperCamelCase = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __lowerCamelCase ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
__UpperCamelCase = [1] + ([0] * len(lowercase )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowercase )) + [1]
return result
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
__UpperCamelCase = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Optional[Any]:
return ()
| 243 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
snake_case__ : Optional[Any] = logging.getLogger(__name__)
def _snake_case ( ):
lowerCAmelCase : Tuple = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=_snake_case , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=_snake_case , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=_snake_case , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=_snake_case , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=_snake_case , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=_snake_case , type=_snake_case , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=_snake_case , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=_snake_case , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
lowerCAmelCase : Optional[Any] = parser.parse_args()
return args
def _snake_case ( _snake_case : Optional[Any] ):
def fn(_snake_case : Optional[int] ):
return tokenizer(examples['''text'''] )
return fn
def _snake_case ( _snake_case : List[Any] ):
lowerCAmelCase : Optional[Any] = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
lowerCAmelCase : Optional[int] = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
lowerCAmelCase : Optional[Any] = tf.train.Features(feature=_snake_case )
lowerCAmelCase : int = tf.train.Example(features=_snake_case )
lowerCAmelCase : Dict = example.SerializeToString()
records.append(_snake_case )
return records
def _snake_case ( _snake_case : List[Any] ):
lowerCAmelCase : str = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase : Dict = min(len(_snake_case ) , args.limit )
lowerCAmelCase : List[Any] = dataset.select(range(_snake_case ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase : Union[str, Any] = os.path.join(args.output_dir , args.split )
if not os.path.exists(_snake_case ):
os.makedirs(_snake_case )
else:
lowerCAmelCase : int = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase : Optional[int] = tokenize_function(_snake_case )
lowerCAmelCase : Tuple = dataset.map(_snake_case , batched=_snake_case , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_snake_case : Tuple ):
# Concatenate all texts.
lowerCAmelCase : int = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase : int = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase : Optional[Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase : Any = {
k: [t[i : i + args.max_length] for i in range(0 , _snake_case , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase : Tuple = dataset_tokenized.map(_snake_case , batched=_snake_case , batch_size=1000 , num_proc=4 )
lowerCAmelCase : str = 0
lowerCAmelCase : Any = 0
for shard in range(0 , len(_snake_case ) , args.shard_size ):
lowerCAmelCase : Dict = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase : List[str] = len(dataset_snapshot['''input_ids'''] )
lowerCAmelCase : List[str] = os.path.join(_snake_case , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
lowerCAmelCase : Union[str, Any] = get_serialized_examples(_snake_case )
with tf.io.TFRecordWriter(_snake_case ) as out_file:
for i in range(len(_snake_case ) ):
lowerCAmelCase : Dict = serialized_examples[i]
out_file.write(_snake_case )
print('''Wrote file {} containing {} records'''.format(_snake_case , _snake_case ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=_snake_case )
if __name__ == "__main__":
snake_case__ : str = parse_args()
main(args)
| 60 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _snake_case ( _snake_case : list[list[float]] ):
lowerCAmelCase : str = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_snake_case ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase : int = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase : Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase, lowerCAmelCase : List[Any] = matrix[1][1], matrix[0][0]
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_snake_case ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_snake_case ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase : int = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
lowerCAmelCase : Dict = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase : List[str] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase : Dict = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase : str = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase : Any = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase : Any = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase : Optional[int] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase : Optional[int] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase : Dict = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase : List[Any] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase : str = array(_snake_case )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase : Optional[Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase : Tuple = array(_snake_case )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_snake_case )
# Calculate the inverse of the matrix
return [[float(d(_snake_case ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 60 | 1 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def a__ ( __SCREAMING_SNAKE_CASE = 8 ) -> str:
__lowerCAmelCase: int = ascii_letters + digits + punctuation
return "".join(secrets.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = i // 3
__lowerCAmelCase: Dict = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__lowerCAmelCase: Union[str, Any] = (
chars_incl
+ random(__SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
+ random(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
)
__lowerCAmelCase: Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
shuffle(__SCREAMING_SNAKE_CASE )
return "".join(__SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
return "".join(secrets.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
pass # Put your code here...
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
pass # Put your code here...
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
pass # Put your code here...
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 8 ) -> bool:
if len(__SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
__lowerCAmelCase: str = any(char in ascii_uppercase for char in password )
__lowerCAmelCase: Dict = any(char in ascii_lowercase for char in password )
__lowerCAmelCase: List[str] = any(char in digits for char in password )
__lowerCAmelCase: Union[str, Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def a__ ( ) -> Optional[Any]:
__lowerCAmelCase: Dict = int(input("Please indicate the max length of your password: " ).strip() )
__lowerCAmelCase: Union[str, Any] = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(__SCREAMING_SNAKE_CASE ) )
print(
"Alternative Password generated:" , alternative_password_generator(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 108 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__A : str = get_tests_dir('''fixtures''')
class __A ( unittest.TestCase ):
def lowercase__ ( self : Any ):
# A mock response for an HTTP head request to emulate server down
lowerCAmelCase : Tuple = mock.Mock()
lowerCAmelCase : Optional[int] = 500
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : int = HTTPError
lowerCAmelCase : Optional[int] = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase : Dict = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase_ ) as mock_head:
lowerCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase__ ( self : int ):
# This test is for deprecated behavior and can be removed in v5
lowerCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __A ( unittest.TestCase ):
@classmethod
def lowercase__ ( cls : List[Any] ):
lowerCAmelCase : List[Any] = TOKEN
HfFolder.save_token(UpperCAmelCase_ )
@classmethod
def lowercase__ ( cls : str ):
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase_ )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
lowerCAmelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCAmelCase_ , repo_id='test-feature-extractor' , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def lowercase__ ( self : int ):
lowerCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase_ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
lowerCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCAmelCase_ , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
lowerCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
def lowercase__ ( self : Tuple ):
CustomFeatureExtractor.register_for_auto_class()
lowerCAmelCase : Union[str, Any] = CustomFeatureExtractor.from_pretrained(UpperCAmelCase_ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
lowerCAmelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=UpperCAmelCase_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 138 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
lowerCAmelCase : Optional[int] = 1_024
lowerCAmelCase : Tuple = 4_096
lowerCAmelCase : Optional[int] = 24
lowerCAmelCase : Optional[int] = 16
lowerCAmelCase : str = [5, 11, 17, 23]
lowerCAmelCase : Tuple = [256, 512, 1_024, 1_024]
lowerCAmelCase : Optional[int] = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCAmelCase : Optional[int] = 768
lowerCAmelCase : int = [1, 1, 1, 0.5]
lowerCAmelCase : List[Any] = [256, 512, 768, 768]
lowerCAmelCase : List[Any] = 150
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : Union[str, Any] = (1, 384, 384)
lowerCAmelCase : Tuple = False
lowerCAmelCase : List[str] = 'project'
if "ade" in checkpoint_url:
lowerCAmelCase : Tuple = True
lowerCAmelCase : str = 768
lowerCAmelCase : List[str] = [1, 1, 1, 0.5]
lowerCAmelCase : Optional[Any] = 150
lowerCAmelCase : List[str] = 16
lowerCAmelCase : Dict = 'huggingface/label-files'
lowerCAmelCase : Optional[Any] = 'ade20k-id2label.json'
lowerCAmelCase : Tuple = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase, _UpperCAmelCase, repo_type='dataset' ) ), 'r' ) )
lowerCAmelCase : Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase : int = idalabel
lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
lowerCAmelCase : int = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : List[str] = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCAmelCase : Optional[int] = name.replace('pretrained.model', 'dpt.encoder' )
if "pretrained.model" in name:
lowerCAmelCase : Dict = name.replace('pretrained.model', 'dpt.embeddings' )
if "patch_embed" in name:
lowerCAmelCase : int = name.replace('patch_embed', '' )
if "pos_embed" in name:
lowerCAmelCase : Any = name.replace('pos_embed', 'position_embeddings' )
if "attn.proj" in name:
lowerCAmelCase : str = name.replace('attn.proj', 'attention.output.dense' )
if "proj" in name and "project" not in name:
lowerCAmelCase : Union[str, Any] = name.replace('proj', 'projection' )
if "blocks" in name:
lowerCAmelCase : List[str] = name.replace('blocks', 'layer' )
if "mlp.fc1" in name:
lowerCAmelCase : Optional[Any] = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase : Any = name.replace('mlp.fc2', 'output.dense' )
if "norm1" in name and "backbone" not in name:
lowerCAmelCase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
lowerCAmelCase : str = name.replace('norm2', 'layernorm_after' )
if "scratch.output_conv" in name:
lowerCAmelCase : int = name.replace('scratch.output_conv', 'head' )
if "scratch" in name:
lowerCAmelCase : Optional[int] = name.replace('scratch', 'neck' )
if "layer1_rn" in name:
lowerCAmelCase : int = name.replace('layer1_rn', 'convs.0' )
if "layer2_rn" in name:
lowerCAmelCase : Optional[Any] = name.replace('layer2_rn', 'convs.1' )
if "layer3_rn" in name:
lowerCAmelCase : List[str] = name.replace('layer3_rn', 'convs.2' )
if "layer4_rn" in name:
lowerCAmelCase : int = name.replace('layer4_rn', 'convs.3' )
if "refinenet" in name:
lowerCAmelCase : Optional[int] = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCAmelCase : Any = name.replace(f"refinenet{layer_idx}", f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
lowerCAmelCase : Dict = name.replace('out_conv', 'projection' )
if "resConfUnit1" in name:
lowerCAmelCase : Optional[int] = name.replace('resConfUnit1', 'residual_layer1' )
if "resConfUnit2" in name:
lowerCAmelCase : List[str] = name.replace('resConfUnit2', 'residual_layer2' )
if "conv1" in name:
lowerCAmelCase : List[Any] = name.replace('conv1', 'convolution1' )
if "conv2" in name:
lowerCAmelCase : Optional[int] = name.replace('conv2', 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCAmelCase : Union[str, Any] = name.replace('pretrained.act_postprocess1.0.project.0', 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCAmelCase : Optional[Any] = name.replace('pretrained.act_postprocess2.0.project.0', 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCAmelCase : List[Any] = name.replace('pretrained.act_postprocess3.0.project.0', 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCAmelCase : Optional[Any] = name.replace('pretrained.act_postprocess4.0.project.0', 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCAmelCase : Tuple = name.replace('pretrained.act_postprocess1.3', 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
lowerCAmelCase : str = name.replace('pretrained.act_postprocess1.4', 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
lowerCAmelCase : int = name.replace('pretrained.act_postprocess2.3', 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
lowerCAmelCase : Optional[Any] = name.replace('pretrained.act_postprocess2.4', 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
lowerCAmelCase : List[str] = name.replace('pretrained.act_postprocess3.3', 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
lowerCAmelCase : List[str] = name.replace('pretrained.act_postprocess4.3', 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
lowerCAmelCase : List[str] = name.replace('pretrained.act_postprocess4.4', 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
lowerCAmelCase : int = name.replace('pretrained', 'dpt' )
if "bn" in name:
lowerCAmelCase : List[str] = name.replace('bn', 'batch_norm' )
if "head" in name:
lowerCAmelCase : Any = name.replace('head', 'head.head' )
if "encoder.norm" in name:
lowerCAmelCase : Dict = name.replace('encoder.norm', 'layernorm' )
if "auxlayer" in name:
lowerCAmelCase : Tuple = name.replace('auxlayer', 'auxiliary_head.head' )
if "backbone" in name:
lowerCAmelCase : Tuple = name.replace('backbone', 'backbone.bit.encoder' )
if ".." in name:
lowerCAmelCase : Optional[Any] = name.replace('..', '.' )
if "stem.conv" in name:
lowerCAmelCase : List[str] = name.replace('stem.conv', 'bit.embedder.convolution' )
if "blocks" in name:
lowerCAmelCase : Dict = name.replace('blocks', 'layers' )
if "convolution" in name and "backbone" in name:
lowerCAmelCase : Dict = name.replace('convolution', 'conv' )
if "layer" in name and "backbone" in name:
lowerCAmelCase : Dict = name.replace('layer', 'layers' )
if "backbone.bit.encoder.bit" in name:
lowerCAmelCase : List[str] = name.replace('backbone.bit.encoder.bit', 'backbone.bit' )
if "embedder.conv" in name:
lowerCAmelCase : Any = name.replace('embedder.conv', 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
lowerCAmelCase : Optional[int] = name.replace('backbone.bit.encoder.stem.norm', 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : List[Any] = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
lowerCAmelCase : int = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase : Dict = in_proj_bias[: config.hidden_size]
lowerCAmelCase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : List[str] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase : Tuple = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : List[str] = get_dpt_config(_UpperCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCAmelCase : str = torch.load(_UpperCAmelCase, map_location='cpu' )
# remove certain keys
remove_ignore_keys_(_UpperCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase : str = state_dict.pop(_UpperCAmelCase )
lowerCAmelCase : int = val
# read in qkv matrices
read_in_q_k_v(_UpperCAmelCase, _UpperCAmelCase )
# load HuggingFace model
lowerCAmelCase : int = DPTForSemanticSegmentation(_UpperCAmelCase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# Check outputs on an image
lowerCAmelCase : str = 480 if 'ade' in checkpoint_url else 384
lowerCAmelCase : Dict = DPTImageProcessor(size=_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = prepare_img()
lowerCAmelCase : Union[str, Any] = image_processor(_UpperCAmelCase, return_tensors='pt' )
# forward pass
lowerCAmelCase : Optional[Any] = model(**_UpperCAmelCase ).logits if 'ade' in checkpoint_url else model(**_UpperCAmelCase ).predicted_depth
if show_prediction:
lowerCAmelCase : str = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ), size=(image.size[1], image.size[0]), mode='bicubic', align_corners=_UpperCAmelCase, )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
__A : Dict = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 138 | 1 |
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : str = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4)) | 8 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowercase : Dict = StableDiffusionInpaintPipeline
lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase : Optional[int] = frozenset([] )
def a__ ( self :Any ):
torch.manual_seed(0 )
snake_case_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=9 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=3_2 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCamelCase ,)
snake_case_ : Tuple = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
snake_case_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act="""gelu""" ,projection_dim=5_1_2 ,)
snake_case_ : Tuple = CLIPTextModel(_UpperCamelCase )
snake_case_ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self :str ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
snake_case_ : int = image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case_ : List[str] = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((6_4, 6_4) )
snake_case_ : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) )
if str(_UpperCamelCase ).startswith("""mps""" ):
snake_case_ : Optional[Any] = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ : Optional[int] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self :Any ):
snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : Optional[Any] = self.get_dummy_components()
snake_case_ : Dict = StableDiffusionInpaintPipeline(**_UpperCamelCase )
snake_case_ : List[str] = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ : Union[str, Any] = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ : Tuple = sd_pipe(**_UpperCamelCase ).images
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ : Dict = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self :Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self :Tuple ):
snake_case_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_UpperCamelCase ,safety_checker=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,)
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a__ ( self :Tuple ):
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
snake_case_ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCamelCase ,torch_dtype=torch.floataa ,safety_checker=_UpperCamelCase ,)
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,)
snake_case_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a__ ( self :Union[str, Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Dict = PNDMScheduler.from_pretrained(_UpperCamelCase ,subfolder="""scheduler""" )
snake_case_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCamelCase ,safety_checker=_UpperCamelCase ,scheduler=_UpperCamelCase ,torch_dtype=torch.floataa ,)
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : Optional[int] = torch.manual_seed(0 )
snake_case_ : Tuple = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=2 ,output_type="""np""" ,)
snake_case_ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 8 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : List[Any] = logging.get_logger(__name__)
_a : Optional[int] = {
'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "dinat"
_UpperCamelCase : Dict = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=4 , a__=3 , a__=64 , a__=[3, 4, 6, 5] , a__=[2, 4, 8, 16] , a__=7 , a__=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , a__=3.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=0.0_2 , a__=1e-5 , a__=0.0 , a__=None , a__=None , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Tuple = len(a__ )
_lowerCAmelCase : str = num_heads
_lowerCAmelCase : List[str] = kernel_size
_lowerCAmelCase : Any = dilations
_lowerCAmelCase : List[Any] = mlp_ratio
_lowerCAmelCase : List[Any] = qkv_bias
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = drop_path_rate
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : Optional[int] = layer_scale_init_value
_lowerCAmelCase : List[str] = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(a__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : Dict = get_aligned_output_features_output_indices(
out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
| 44 | """simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ) -> List[str]:
_lowerCAmelCase : Tuple = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[int] = height - k_size + 1
_lowerCAmelCase : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(_lowerCamelCase ) ,range(_lowerCamelCase ) ):
_lowerCAmelCase : Any = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : List[Any] = gen_gaussian_kernel(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = ravel(_lowerCamelCase )
# reshape and get the dst image
_lowerCAmelCase : int = dot(_lowerCamelCase ,_lowerCamelCase ).reshape(_lowerCamelCase ,_lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_a : Optional[Any] = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
_a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_a : Union[str, Any] = gaussian_filter(gray, 3, sigma=1)
_a : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 44 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 357 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[int]=0.999 , A__ : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__lowerCamelCase = []
for i in range(A__ ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ : Any = 2
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: float = 0.0_0085 , UpperCamelCase_: float = 0.012 , UpperCamelCase_: str = "linear" , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase_: str = "epsilon" , UpperCamelCase_: str = "linspace" , UpperCamelCase_: int = 0 , ):
if trained_betas is not None:
__lowerCamelCase = torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase = torch.linspace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(UpperCamelCase_ )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any]=None ):
if schedule_timesteps is None:
__lowerCamelCase = self.timesteps
__lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase = 1 if len(UpperCamelCase_ ) > 1 else 0
else:
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
__lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase__ ( self: Optional[int] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: Union[float, torch.FloatTensor] , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
else:
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None , UpperCamelCase_: Optional[int] = None , ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase_ , dtype=UpperCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(UpperCamelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase = torch.from_numpy(np.log(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = np.interp(UpperCamelCase_ , np.arange(0 , len(UpperCamelCase_ ) ) , UpperCamelCase_ )
__lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ )
# interpolate sigmas
__lowerCamelCase = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCamelCase_ ).startswith("""mps""" ):
# mps does not support float64
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=torch.floataa )
else:
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
# interpolate timesteps
__lowerCamelCase = self.sigma_to_t(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=timesteps.dtype )
__lowerCamelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__lowerCamelCase = torch.cat([timesteps[:1], interleaved_timesteps] )
__lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase = defaultdict(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
# get log sigma
__lowerCamelCase = sigma.log()
# get distribution
__lowerCamelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__lowerCamelCase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__lowerCamelCase = low_idx + 1
__lowerCamelCase = self.log_sigmas[low_idx]
__lowerCamelCase = self.log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase = (low - log_sigma) / (low - high)
__lowerCamelCase = w.clamp(0 , 1 )
# transform interpolation to time range
__lowerCamelCase = (1 - w) * low_idx + w * high_idx
__lowerCamelCase = t.view(sigma.shape )
return t
@property
def lowerCAmelCase__ ( self: Dict ):
return self.sample is None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: Union[float, torch.FloatTensor] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
# advance index counter by 1
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = self.sigmas_interpol[step_index + 1]
__lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__lowerCamelCase = self.sigmas[step_index - 1]
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase = 0
__lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase = sigma_interpol - sigma_hat
# store for 2nd order step
__lowerCamelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__lowerCamelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__lowerCamelCase = sigma_next - sigma_hat
__lowerCamelCase = self.sample
__lowerCamelCase = None
__lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase_ ):
# mps does not support float64
__lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase = self.timesteps.to(original_samples.device )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = [self.index_for_timestep(UpperCamelCase_ , UpperCamelCase_ ) for t in timesteps]
__lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase = sigma.unsqueeze(-1 )
__lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self: Tuple ):
return self.config.num_train_timesteps
| 29 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : str = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : List[Any] = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__SCREAMING_SNAKE_CASE : Optional[Any] = remove_duplicates(key.upper() )
__SCREAMING_SNAKE_CASE : str = len(lowerCAmelCase__ )
# First fill cipher with key characters
__SCREAMING_SNAKE_CASE : Optional[int] = {alphabet[i]: char for i, char in enumerate(lowerCAmelCase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCAmelCase__ ) , 26 ):
__SCREAMING_SNAKE_CASE : Tuple = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__SCREAMING_SNAKE_CASE : Optional[Any] = alphabet[i - offset]
__SCREAMING_SNAKE_CASE : List[str] = char
return cipher_alphabet
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: dict[str, str] ):
return "".join(cipher_map.get(lowerCAmelCase__ , lowerCAmelCase__ ) for ch in message.upper() )
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: dict[str, str] ):
__SCREAMING_SNAKE_CASE : Any = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCAmelCase__ , lowerCAmelCase__ ) for ch in message.upper() )
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : int = input("""Enter message to encode or decode: """ ).strip()
__SCREAMING_SNAKE_CASE : Any = input("""Enter keyword: """ ).strip()
__SCREAMING_SNAKE_CASE : Tuple = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__SCREAMING_SNAKE_CASE : List[str] = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = create_cipher_map(lowerCAmelCase__ )
print(func(lowerCAmelCase__ , lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 112 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 254 | 0 |
'''simple docstring'''
import math
import sys
def _A (lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_a = ''
try:
with open(lowerCAmelCase__ , 'rb' ) as binary_file:
_a = binary_file.read()
for dat in data:
_a = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _A (lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
_a = {'0': '0', '1': '1'}
_a , _a = '', ''
_a = len(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_a = lexicon[curr_string]
result += last_match_id
_a = last_match_id + '0'
if math.loga(lowerCAmelCase__ ).is_integer():
_a = {}
for curr_key in list(lowerCAmelCase__ ):
_a = lexicon.pop(lowerCAmelCase__ )
_a = new_lex
_a = last_match_id + '1'
index += 1
_a = ''
return result
def _A (lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ) -> Any:
'''simple docstring'''
_a = 8
try:
with open(lowerCAmelCase__ , 'wb' ) as opened_file:
_a = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowerCAmelCase__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _A (lowerCAmelCase__ :Any ) -> int:
'''simple docstring'''
_a = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_a = data_bits[counter:]
_a = data_bits[counter + 1 :]
return data_bits
def _A (lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict ) -> Any:
'''simple docstring'''
_a = read_file_binary(lowerCAmelCase__ )
_a = remove_prefix(lowerCAmelCase__ )
_a = decompress_data(lowerCAmelCase__ )
write_file_binary(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 355 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ : Tuple = logging.get_logger(__name__)
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = ["""input_features"""]
def __init__( self , __magic_name__=80 , __magic_name__=1_60_00 , __magic_name__=1_60 , __magic_name__=30 , __magic_name__=4_00 , __magic_name__=0.0 , __magic_name__=False , **__magic_name__ , ) -> Optional[int]:
super().__init__(
feature_size=__magic_name__ , sampling_rate=__magic_name__ , padding_value=__magic_name__ , return_attention_mask=__magic_name__ , **__magic_name__ , )
_a = n_fft
_a = hop_length
_a = chunk_length
_a = chunk_length * sampling_rate
_a = self.n_samples // hop_length
_a = sampling_rate
_a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__magic_name__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=__magic_name__ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self , __magic_name__ ) -> np.ndarray:
_a = spectrogram(
__magic_name__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
_a = log_spec[:, :-1]
_a = np.maximum(__magic_name__ , log_spec.max() - 8.0 )
_a = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __UpperCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
_a = np.array(__magic_name__ , np.intaa )
_a = []
for vector, length in zip(__magic_name__ , attention_mask.sum(-1 ) ):
_a = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_a = padding_value
normed_input_values.append(__magic_name__ )
else:
_a = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __magic_name__ , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "max_length" , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_a = isinstance(__magic_name__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_a = is_batched_numpy or (
isinstance(__magic_name__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__magic_name__ , np.ndarray ):
_a = np.asarray(__magic_name__ , dtype=np.floataa )
elif isinstance(__magic_name__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a = [np.asarray([raw_speech] ).T]
_a = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
_a = self.pad(
__magic_name__ , padding=__magic_name__ , max_length=max_length if max_length else self.n_samples , truncation=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_a = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
_a = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
_a = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
_a = [self._np_extract_fbank_features(__magic_name__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , __magic_name__ ):
_a = [np.asarray(__magic_name__ , dtype=np.floataa ) for feature in input_features]
else:
_a = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_a = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
_a = padded_inputs.convert_to_tensors(__magic_name__ )
return padded_inputs
def __UpperCAmelCase ( self ) -> Dict[str, Any]:
_a = copy.deepcopy(self.__dict__ )
_a = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 104 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowercase__ : str = None
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Tuple = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : Union[str, Any] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
lowercase__ : Union[str, Any] = {
'''facebook/nllb-large-en-ro''': 10_24,
'''facebook/nllb-200-distilled-600M''': 10_24,
}
# fmt: off
lowercase__ : Dict = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : List[Any] = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Any = ['input_ids', 'attention_mask']
_snake_case : Dict = NllbTokenizer
_snake_case : List[int] = []
_snake_case : List[int] = []
def __init__( self : List[str] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : List[Any]="<s>" , lowerCAmelCase__ : Any="</s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Dict="<s>" , lowerCAmelCase__ : Tuple="<unk>" , lowerCAmelCase__ : Union[str, Any]="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[Any]=False , **lowerCAmelCase__ : List[str] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
_UpperCamelCase = legacy_behaviour
super().__init__(
vocab_file=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , legacy_behaviour=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
_UpperCamelCase = vocab_file
_UpperCamelCase = False if not self.vocab_file else True
_UpperCamelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
_UpperCamelCase = {
lang_code: self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_UpperCamelCase = src_lang if src_lang is not None else '''eng_Latn'''
_UpperCamelCase = self.convert_tokens_to_ids(self._src_lang )
_UpperCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def snake_case__ ( self : str ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Any ) -> None:
'''simple docstring'''
_UpperCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case__ ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int , **lowerCAmelCase__ : Dict ) -> Tuple:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_UpperCamelCase = src_lang
_UpperCamelCase = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCamelCase = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
_UpperCamelCase = tgt_lang_id
return inputs
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] = "eng_Latn" , lowerCAmelCase__ : Any = None , lowerCAmelCase__ : Any = "fra_Latn" , **lowerCAmelCase__ : Union[str, Any] , ) -> BatchEncoding:
'''simple docstring'''
_UpperCamelCase = src_lang
_UpperCamelCase = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case__ ( self : str , lowerCAmelCase__ : Optional[Any] ) -> None:
'''simple docstring'''
_UpperCamelCase = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
if self.legacy_behaviour:
_UpperCamelCase = []
_UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase = [self.cur_lang_code]
_UpperCamelCase = [self.eos_token_id]
_UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def snake_case__ ( self : Any , lowerCAmelCase__ : Optional[Any] ) -> None:
'''simple docstring'''
_UpperCamelCase = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
if self.legacy_behaviour:
_UpperCamelCase = []
_UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase = [self.cur_lang_code]
_UpperCamelCase = [self.eos_token_id]
_UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
_UpperCamelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 324 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase__ :Optional[int] = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int]=None ) -> Any:
'''simple docstring'''
require_version(deps[pkg] , a__ )
| 329 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
# Initialise PyTorch model
_A : List[str] = FunnelConfig.from_json_file(snake_case_ )
print(f'''Building PyTorch model from configuration: {config}''' )
_A : List[str] = FunnelBaseModel(snake_case_ ) if base_model else FunnelModel(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(snake_case_,snake_case_,snake_case_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(),snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 364 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ ):
create_state_space_tree(snake_case_,[],0,[0 for i in range(len(snake_case_ ) )] )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,):
if index == len(snake_case_ ):
print(snake_case_ )
return
for i in range(len(snake_case_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_A : Optional[Any] = True
create_state_space_tree(snake_case_,snake_case_,index + 1,snake_case_ )
current_sequence.pop()
_A : str = False
_snake_case = [3, 1, 2, 4]
generate_all_permutations(sequence)
_snake_case = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 343 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__a : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__a : Tuple = 2_5_0_0_0_4
__a : List[str] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : Dict = MBartaaTokenizer
__a : Union[str, Any] = MBartaaTokenizerFast
__a : Any = True
__a : List[str] = True
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = MBartaaTokenizer(lowerCAmelCase__ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''<s>'''
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCAmelCase__ ) , 10_54 )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = MBartaaTokenizer(lowerCAmelCase__ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=lowerCAmelCase__ )
__lowercase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
__lowercase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowercase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowercase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = tempfile.mkdtemp()
__lowercase = tokenizer_r.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__lowercase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
__lowercase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
__lowercase = tempfile.mkdtemp()
__lowercase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
__lowercase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
__lowercase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
__lowercase = tempfile.mkdtemp()
__lowercase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
__lowercase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowercase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
__a : List[Any] = '''facebook/mbart-large-50-one-to-many-mmt'''
__a : Dict = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__a : List[Any] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__a : List[Any] = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ) -> Optional[Any]:
'''simple docstring'''
__lowercase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
__lowercase = 1
return cls
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
__lowercase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__lowercase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
__lowercase = 10
__lowercase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[0] , lowerCAmelCase__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
__lowercase = MBartaaTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' )
__lowercase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__lowercase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__lowercase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' )
__lowercase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' )
__lowercase = targets['''input_ids''']
__lowercase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_00_04, 62, 30_34, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , ) | 210 | from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
__lowercase = precision
__lowercase = ceil(precision / 14 )
__lowercase = 426880 * Decimal(10005 ).sqrt()
__lowercase = 1
__lowercase = 13591409
__lowercase = Decimal(lowercase )
for k in range(1 , lowercase ):
__lowercase = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__a : Optional[Any] = 5_0
print(F'''The first {n} digits of pi is: {pi(n)}''') | 210 | 1 |
"""simple docstring"""
from __future__ import annotations
_UpperCAmelCase : Optional[Any] = tuple[int, int, int]
_UpperCAmelCase : Dict = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_UpperCAmelCase : List[str] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
_UpperCAmelCase : List[str] = "EGZWVONAHDCLFQMSIPJBYUKXTR"
_UpperCAmelCase : Optional[int] = "FOBHMDKEXQNRAULPGSJVTYICZW"
_UpperCAmelCase : Any = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
_UpperCAmelCase : Dict = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
_UpperCAmelCase : Optional[Any] = "RMDJXFUWGISLHVTCQNKYPBEZOA"
_UpperCAmelCase : int = "SGLCPQWZHKXAREONTFBVIYJUDM"
_UpperCAmelCase : Any = "HVSICLTYKQUBXDWAJZOMFGPREN"
_UpperCAmelCase : List[Any] = "RZWQHFMVDBKICJLNTUXAGYPSOE"
_UpperCAmelCase : Union[str, Any] = "LFKIJODBEGAMQPXVUHYSTCZRWN"
_UpperCAmelCase : Optional[int] = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(lowerCamelCase ) )) < 3:
lowercase :Optional[int] = F"Please use 3 unique rotors (not {unique_rotsel})"
raise Exception(lowerCamelCase )
# Checks if rotor positions are valid
lowercase :Union[str, Any] = rotpos
if not 0 < rotorposa <= len(lowerCamelCase ):
lowercase :Dict = F"First rotor position is not within range of 1..26 ({rotorposa}"
raise ValueError(lowerCamelCase )
if not 0 < rotorposa <= len(lowerCamelCase ):
lowercase :Any = F"Second rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(lowerCamelCase )
if not 0 < rotorposa <= len(lowerCamelCase ):
lowercase :Any = F"Third rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(lowerCamelCase )
# Validates string and returns dict
lowercase :Union[str, Any] = _plugboard(lowerCamelCase )
return rotpos, rotsel, pbdict
def UpperCAmelCase__ ( lowerCamelCase ):
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(lowerCamelCase, lowerCamelCase ):
lowercase :int = F"Plugboard setting isn't type string ({type(lowerCamelCase )})"
raise TypeError(lowerCamelCase )
elif len(lowerCamelCase ) % 2 != 0:
lowercase :List[str] = F"Odd number of symbols ({len(lowerCamelCase )})"
raise Exception(lowerCamelCase )
elif pbstring == "":
return {}
pbstring.replace(" ", "" )
# Checks if all characters are unique
lowercase :Optional[int] = set()
for i in pbstring:
if i not in abc:
lowercase :Dict = F"'{i}' not in list of symbols"
raise Exception(lowerCamelCase )
elif i in tmppbl:
lowercase :Optional[int] = F"Duplicate symbol ({i})"
raise Exception(lowerCamelCase )
else:
tmppbl.add(lowerCamelCase )
del tmppbl
# Created the dictionary
lowercase :List[str] = {}
for j in range(0, len(lowerCamelCase ) - 1, 2 ):
lowercase :Dict = pbstring[j + 1]
lowercase :List[Any] = pbstring[j]
return pb
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase = (rotora, rotora, rotora), lowerCamelCase = "", ):
lowercase :Dict = text.upper()
lowercase :Tuple = _validator(
lowerCamelCase, lowerCamelCase, plugb.upper() )
lowercase :Any = rotor_position
lowercase :Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowercase :List[Any] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowercase :List[str] = plugboard[symbol]
# rotor ra --------------------------
lowercase :Any = abc.index(lowerCamelCase ) + rotorposa
lowercase :Dict = rotora[index % len(lowerCamelCase )]
# rotor rb --------------------------
lowercase :Optional[Any] = abc.index(lowerCamelCase ) + rotorposa
lowercase :Tuple = rotora[index % len(lowerCamelCase )]
# rotor rc --------------------------
lowercase :Any = abc.index(lowerCamelCase ) + rotorposa
lowercase :Union[str, Any] = rotora[index % len(lowerCamelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowercase :Union[str, Any] = reflector[symbol]
# 2nd rotors
lowercase :List[str] = abc[rotora.index(lowerCamelCase ) - rotorposa]
lowercase :Optional[Any] = abc[rotora.index(lowerCamelCase ) - rotorposa]
lowercase :Optional[int] = abc[rotora.index(lowerCamelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowercase :Dict = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCamelCase ):
lowercase :Dict = 0
rotorposa += 1
if rotorposa >= len(lowerCamelCase ):
lowercase :Optional[int] = 0
rotorposa += 1
if rotorposa >= len(lowerCamelCase ):
lowercase :Optional[Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCamelCase )
return "".join(lowerCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : int = "This is my Python script that emulates the Enigma machine from WWII."
_UpperCAmelCase : str = (1, 1, 1)
_UpperCAmelCase : str = "pictures"
_UpperCAmelCase : Tuple = (rotora, rotora, rotora)
_UpperCAmelCase : int = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 365 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( lowerCAmelCase):
_a = (DDIMParallelScheduler,)
_a = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def SCREAMING_SNAKE_CASE ( self: Any , **_lowerCAmelCase: Optional[Any] ):
lowercase :List[Any] = {
"num_train_timesteps": 10_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self: str , **_lowerCAmelCase: Any ):
lowercase :Optional[int] = self.scheduler_classes[0]
lowercase :Dict = self.get_scheduler_config(**_lowerCAmelCase )
lowercase :List[str] = scheduler_class(**_lowerCAmelCase )
lowercase , lowercase :str = 10, 0.0
lowercase :List[Any] = self.dummy_model()
lowercase :int = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for t in scheduler.timesteps:
lowercase :Optional[int] = model(_lowerCAmelCase , _lowerCAmelCase )
lowercase :Dict = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCAmelCase )
lowercase :Optional[Any] = self.scheduler_classes[0]
lowercase :List[str] = self.get_scheduler_config(steps_offset=1 )
lowercase :Optional[int] = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Dict ):
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self: str ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowerCAmelCase , eta=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Dict = self.scheduler_classes[0]
lowercase :Tuple = self.get_scheduler_config()
lowercase :Optional[Any] = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Union[str, Any] = self.scheduler_classes[0]
lowercase :Union[str, Any] = self.get_scheduler_config()
lowercase :Union[str, Any] = scheduler_class(**_lowerCAmelCase )
lowercase , lowercase :Union[str, Any] = 10, 0.0
scheduler.set_timesteps(_lowerCAmelCase )
lowercase :Dict = self.dummy_model()
lowercase :Dict = self.dummy_sample_deter
lowercase :Union[str, Any] = self.dummy_sample_deter + 0.1
lowercase :int = self.dummy_sample_deter - 0.1
lowercase :Dict = samplea.shape[0]
lowercase :Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
lowercase :Optional[Any] = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
lowercase :Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowercase :Optional[int] = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowerCAmelCase )
lowercase :int = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Optional[int] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :int = self.full_loop()
lowercase :Optional[int] = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Any = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :Dict = self.full_loop(prediction_type="v_prediction" )
lowercase :int = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
# We specify different beta, so that the first alpha is 0.99
lowercase :List[Any] = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
lowercase :List[Any] = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Any ):
# We specify different beta, so that the first alpha is 0.99
lowercase :Tuple = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
lowercase :str = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :List[str] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 158 | 0 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase : Optional[int] = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 128,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple) ->Union[str, Any]:
'''simple docstring'''
A__ = TOKEN
HfFolder.save_token(UpperCAmelCase__)
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int]) ->Optional[int]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-config''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''')
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
'''simple docstring'''
A__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''test-config''' , use_auth_token=self._token)
A__ = BertConfig.from_pretrained(f"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__))
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase__ , repo_id='''test-config''' , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token)
A__ = BertConfig.from_pretrained(f"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__))
def SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
'''simple docstring'''
A__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token)
A__ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token)
A__ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__))
def SCREAMING_SNAKE_CASE ( self : str) ->Dict:
'''simple docstring'''
CustomConfig.register_for_auto_class()
A__ = CustomConfig(attribute=42)
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''})
A__ = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=UpperCAmelCase__)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''')
self.assertEqual(new_config.attribute , 42)
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
A__ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
A__ = c.n_embd + 1 # int
A__ = c.resid_pdrop + 1.0 # float
A__ = not c.scale_attn_weights # bool
A__ = c.summary_type + '''foo''' # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""")
self.assertEqual(UpperCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''')
self.assertEqual(UpperCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''')
self.assertEqual(UpperCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''')
self.assertEqual(UpperCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''')
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
'''simple docstring'''
A__ = PretrainedConfig()
A__ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''])
A__ = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase__ , UpperCAmelCase__)]
if len(UpperCAmelCase__) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f""" {", ".join(UpperCAmelCase__)}.""")
def SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase__):
# config is in subfolder, the following should not work without specifying the subfolder
A__ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''')
A__ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''')
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
A__ = mock.Mock()
A__ = 500
A__ = {}
A__ = HTTPError
A__ = {}
# Download this model to make sure it's in the cache.
A__ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase__) as mock_head:
A__ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
'''simple docstring'''
A__ = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''')
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
A__ = AutoConfig.from_pretrained('''bert-base-cased''')
A__ = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase__)
A__ = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase__ , '''config.4.0.0.json''') , '''w'''))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
A__ = ['''config.42.0.0.json''']
A__ = 768
configuration.save_pretrained(UpperCAmelCase__)
shutil.move(os.path.join(UpperCAmelCase__ , '''config.4.0.0.json''') , os.path.join(UpperCAmelCase__ , '''config.42.0.0.json'''))
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
self.assertEqual(new_configuration.hidden_size , 768)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
A__ = '''v4.0.0'''
A__ , A__ = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase__ , return_unused_kwargs=UpperCAmelCase__)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase__ , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
A__ = '''v3.0.0'''
A__ = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase__)
self.assertEqual(old_configuration.hidden_size , 768)
| 14 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
lowercase_ = len(__lowerCAmelCase )
lowercase_ = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
lowercase_ = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 | 0 |
def __lowercase ( a__ ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
import os
def __lowercase ( a__ = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file:
__SCREAMING_SNAKE_CASE = [
[int(a__ ) for element in line.split(',' )]
for line in input_file.readlines()
]
__SCREAMING_SNAKE_CASE = len(a__ )
__SCREAMING_SNAKE_CASE = len(matrix[0] )
__SCREAMING_SNAKE_CASE = [[-1 for _ in range(a__ )] for _ in range(a__ )]
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = matrix[i][0]
for j in range(1 , a__ ):
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , a__ ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 118 | 0 |
from collections import defaultdict
def __UpperCAmelCase ( a_ , a_):
snake_case_ = first_str.lower().strip()
snake_case_ = second_str.lower().strip()
# Remove whitespace
snake_case_ = first_str.replace(' ' , '')
snake_case_ = second_str.replace(' ' , '')
# Strings of different lengths are not anagrams
if len(a_) != len(a_):
return False
# Default values for count should be 0
snake_case_ = defaultdict(a_)
# For each character in input strings,
# increment count in the corresponding
for i in range(len(a_)):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values())
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase = input("Enter the first string ").strip()
lowercase = input("Enter the second string ").strip()
lowercase = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 178 |
lowercase = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowercase = [{"type": "code", "content": INSTALL_CONTENT}]
lowercase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 178 | 1 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
return " ".join(
"""""".join(word[::-1] ) if len(UpperCAmelCase__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 101 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 101 | 1 |
'''simple docstring'''
import heapq
import sys
import numpy as np
_A : Dict =tuple[int, int]
class _lowercase :
def __init__( self: List[str] ):
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Dict = set()
def lowerCamelCase_ ( self: Tuple ):
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def lowerCamelCase_ ( self: int ):
return len(self.elements ) == 0
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: Any ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(UpperCamelCase__ )
else:
# update
# print("update", item)
lowerCamelCase__ : List[str] = []
((lowerCamelCase__) , (lowerCamelCase__)) : List[str] = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowerCamelCase__) , (lowerCamelCase__)) : Optional[Any] = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: int ):
if item in self.set:
self.set.remove(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = []
((lowerCamelCase__) , (lowerCamelCase__)) : Optional[Any] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowerCamelCase__) , (lowerCamelCase__)) : Tuple = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowerCamelCase_ ( self: str ):
return self.elements[0][1]
def lowerCamelCase_ ( self: int ):
((lowerCamelCase__) , (lowerCamelCase__)) : str = heapq.heappop(self.elements )
self.set.remove(UpperCamelCase__ )
return (priority, item)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Tuple:
# euclidean distance
lowerCamelCase__ : Any = np.array(UpperCamelCase )
lowerCamelCase__ : List[Any] = np.array(UpperCamelCase )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[str]:
# integer division by time variable
return consistent_heuristic(UpperCamelCase , UpperCamelCase ) // t
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Any:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : List[Any] = g_function[start] + Wa * heuristics[i](UpperCamelCase , UpperCamelCase )
return ans
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : Union[str, Any] = np.chararray((n, n) )
for i in range(UpperCamelCase ):
for j in range(UpperCamelCase ):
lowerCamelCase__ : List[str] = """*"""
for i in range(UpperCamelCase ):
for j in range(UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
lowerCamelCase__ : Tuple = """#"""
lowerCamelCase__ : Any = """-"""
lowerCamelCase__ : int = back_pointer[goal]
while x != start:
((lowerCamelCase__) , (lowerCamelCase__)) : List[Any] = x
# print(x)
lowerCamelCase__ : Dict = """-"""
lowerCamelCase__ : List[str] = back_pointer[x]
lowerCamelCase__ : int = """-"""
for i in range(UpperCamelCase ):
for j in range(UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
lowerCamelCase__ : List[str] = back_pointer[goal]
while x != start:
print(UpperCamelCase , end=""" """ )
lowerCamelCase__ : Union[str, Any] = back_pointer[x]
print(UpperCamelCase )
sys.exit()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Optional[Any]:
for itera in range(UpperCamelCase ):
open_list[itera].remove_element(UpperCamelCase )
# print("s", s)
# print("j", j)
((lowerCamelCase__) , (lowerCamelCase__)) : Tuple = s
lowerCamelCase__ : Optional[Any] = (x - 1, y)
lowerCamelCase__ : List[str] = (x + 1, y)
lowerCamelCase__ : Union[str, Any] = (x, y + 1)
lowerCamelCase__ : Optional[int] = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(UpperCamelCase )
lowerCamelCase__ : str = -1
lowerCamelCase__ : List[Any] = float("""inf""" )
if valid(UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
lowerCamelCase__ : List[str] = g_function[s] + 1
lowerCamelCase__ : Dict = s
if neighbours not in close_list_anchor:
open_list[0].put(UpperCamelCase , key(UpperCamelCase , 0 , UpperCamelCase , UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , UpperCamelCase ):
if key(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) <= Wa * key(
UpperCamelCase , 0 , UpperCamelCase , UpperCamelCase ):
open_list[j].put(
UpperCamelCase , key(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) )
def SCREAMING_SNAKE_CASE_ () -> Tuple:
lowerCamelCase__ : Tuple = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
_A : List[str] ={0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_A : int =[
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
_A : Dict =make_common_ground()
_A : str =blocks_blk
# hyper parameters
_A : List[Any] =1
_A : Union[str, Any] =1
_A : Optional[int] =20
_A : Dict =3 # one consistent and two other inconsistent
# start and end destination
_A : Union[str, Any] =(0, 0)
_A : Optional[int] =(n - 1, n - 1)
_A : Optional[Any] =1
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCamelCase__ : Dict = {start: 0, goal: float("""inf""" )}
lowerCamelCase__ : Optional[int] = {start: -1, goal: -1}
lowerCamelCase__ : int = []
lowerCamelCase__ : Tuple = set()
for i in range(UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(UpperCamelCase , key(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) )
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : list[int] = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
lowerCamelCase__ , lowerCamelCase__ : List[str] = open_list[i].top_show()
visited.add(UpperCamelCase )
expand_state(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
close_list_inad.append(UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
lowerCamelCase__ : Tuple = open_list[0].top_show()
visited.add(UpperCamelCase )
expand_state(
UpperCamelCase , 0 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
close_list_anchor.append(UpperCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(UpperCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 41 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_A : Dict ='''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
_A : List[str] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_A : str =dict(zip(vocab, range(len(vocab))))
_A : List[str] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
_A : Union[str, Any] =Path(tmpdirname)
_A : str =build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
_A : int =build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
_A : List[Any] =build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
_A : int =FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_A : List[str] =FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_A : Union[str, Any] =FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
_A : List[str] =tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_A : Tuple =tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 41 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 197 | from __future__ import annotations
import numpy as np
def lowerCAmelCase( __lowerCamelCase ):
return np.maximum(0 , __lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 197 | 1 |
"""simple docstring"""
UpperCamelCase_ = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCamelCase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCamelCase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
} | 243 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCamelCase_ = 2
class snake_case :
def __init__( self , *, # begin keyword-only arguments
__UpperCAmelCase="<s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase=None , ) ->Tuple:
a_ , a_ , a_ , a_ = bos, unk, pad, eos
a_ = []
a_ = []
a_ = {}
a_ = self.add_symbol(__UpperCAmelCase)
a_ = self.add_symbol(__UpperCAmelCase)
a_ = self.add_symbol(__UpperCAmelCase)
a_ = self.add_symbol(__UpperCAmelCase)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__UpperCAmelCase)
a_ = len(self.symbols)
def __eq__( self , __UpperCAmelCase) ->Dict:
return self.indices == other.indices
def __getitem__( self , __UpperCAmelCase) ->Optional[Any]:
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self) ->Any:
return len(self.symbols)
def __contains__( self , __UpperCAmelCase) ->Dict:
return sym in self.indices
@classmethod
def UpperCAmelCase__ ( cls , __UpperCAmelCase) ->List[Any]:
a_ = cls()
d.add_from_file(__UpperCAmelCase)
return d
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=1 , __UpperCAmelCase=False) ->List[Any]:
if word in self.indices and not overwrite:
a_ = self.indices[word]
a_ = self.count[idx] + n
return idx
else:
a_ = len(self.symbols)
a_ = idx
self.symbols.append(__UpperCAmelCase)
self.count.append(__UpperCAmelCase)
return idx
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tuple:
return 0
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[str]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
try:
with open(__UpperCAmelCase , "r" , encoding="utf-8") as fd:
self.add_from_file(__UpperCAmelCase)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(__UpperCAmelCase))
return
a_ = f.readlines()
a_ = self._load_meta(__UpperCAmelCase)
for line in lines[indices_start_line:]:
try:
a_ , a_ = line.rstrip().rsplit(" " , 1)
if field == "#fairseq:overwrite":
a_ = True
a_ , a_ = line.rsplit(" " , 1)
else:
a_ = False
a_ = int(__UpperCAmelCase)
a_ = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(__UpperCAmelCase))
self.add_symbol(__UpperCAmelCase , n=__UpperCAmelCase , overwrite=__UpperCAmelCase)
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'")
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = dict((re.sub(r"@@$" , "" , UpperCAmelCase ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , UpperCAmelCase ), v) for k, v in d.items() )
a_ = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
a_ = d[k] # restore
return da
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
if not os.path.exists(UpperCAmelCase ):
raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
a_ = os.path.join(UpperCAmelCase , "checkpoint.pt" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' )
a_ = torch.load(UpperCAmelCase , map_location="cpu" )
a_ = chkpt["cfg"]["model"]
# dicts
a_ = os.path.join(UpperCAmelCase , "dict.txt" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(F'''path to the file {dict_file} does not exist!''' )
a_ = Dictionary.load(UpperCAmelCase )
a_ = rewrite_dict_keys(src_dict.indices )
a_ = len(UpperCAmelCase )
a_ = os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES["vocab_file"] )
print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# merges_file (bpecodes)
a_ = os.path.join(UpperCAmelCase , "bpecodes" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' )
a_ = os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(UpperCAmelCase , UpperCAmelCase )
# model config
a_ = os.path.join(UpperCAmelCase , "config.json" )
a_ = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F'''Generating {biogpt_model_config_file}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# tokenizer config
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
a_ = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1_024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F'''Generating {biogpt_tokenizer_config_file}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# model
a_ = chkpt["model"]
# remove unneeded keys
a_ = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(UpperCAmelCase , UpperCAmelCase )
a_ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
a_ = model_state_dict.pop(UpperCAmelCase )
else:
a_ = model_state_dict.pop(UpperCAmelCase )
a_ = BioGptConfig.from_pretrained(UpperCAmelCase )
a_ = BioGptForCausalLM(UpperCAmelCase )
# check that it loads ok
model_new.load_state_dict(UpperCAmelCase )
# save
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(UpperCAmelCase , UpperCAmelCase )
print("Conversion is done!" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase_ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path) | 243 | 1 |
class _snake_case :
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict ):
__lowerCamelCase : str = name
__lowerCamelCase : Dict = val
def __str__( self : List[str] ):
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Optional[int] , UpperCAmelCase : Union[str, Any] ):
return self.val < other.val
class _snake_case :
def __init__( self : Union[str, Any] , UpperCAmelCase : str ):
__lowerCamelCase : Tuple = {}
__lowerCamelCase : Union[str, Any] = {}
__lowerCamelCase : Optional[int] = self.build_heap(lowerCamelCase_ )
def __getitem__( self : Tuple , UpperCAmelCase : Union[str, Any] ):
return self.get_value(lowerCamelCase_ )
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : int ):
return (idx - 1) // 2
def lowerCamelCase__ ( self : Any , UpperCAmelCase : int ):
return idx * 2 + 1
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : str ):
return idx * 2 + 2
def lowerCamelCase__ ( self : str , UpperCAmelCase : int ):
return self.heap_dict[key]
def lowerCamelCase__ ( self : str , UpperCAmelCase : Tuple ):
__lowerCamelCase : Tuple = len(lowerCamelCase_ ) - 1
__lowerCamelCase : Dict = self.get_parent_idx(lowerCamelCase_ )
for idx, i in enumerate(lowerCamelCase_ ):
__lowerCamelCase : List[str] = idx
__lowerCamelCase : Any = i.val
for i in range(lowerCamelCase_ , -1 , -1 ):
self.sift_down(lowerCamelCase_ , lowerCamelCase_ )
return array
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
while True:
__lowerCamelCase : int = self.get_left_child_idx(lowerCamelCase_ ) # noqa: E741
__lowerCamelCase : int = self.get_right_child_idx(lowerCamelCase_ )
__lowerCamelCase : Optional[Any] = idx
if l < len(lowerCamelCase_ ) and array[l] < array[idx]:
__lowerCamelCase : Tuple = l
if r < len(lowerCamelCase_ ) and array[r] < array[smallest]:
__lowerCamelCase : List[Any] = r
if smallest != idx:
__lowerCamelCase , __lowerCamelCase : str = array[smallest], array[idx]
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Any = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__lowerCamelCase : Optional[int] = smallest
else:
break
def lowerCamelCase__ ( self : Any , UpperCAmelCase : Optional[int] ):
__lowerCamelCase : Optional[int] = self.get_parent_idx(lowerCamelCase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
__lowerCamelCase , __lowerCamelCase : Tuple = self.heap[idx], self.heap[p]
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__lowerCamelCase : Dict = p
__lowerCamelCase : int = self.get_parent_idx(lowerCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
return self.heap[0]
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase , __lowerCamelCase : Any = self.heap[-1], self.heap[0]
__lowerCamelCase , __lowerCamelCase : Dict = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__lowerCamelCase : Dict = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
self.heap.append(lowerCamelCase_ )
__lowerCamelCase : List[str] = len(self.heap ) - 1
__lowerCamelCase : int = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase__ ( self : List[str] ):
return len(self.heap ) == 0
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__lowerCamelCase : List[str] = new_value
__lowerCamelCase : Union[str, Any] = new_value
self.sift_up(self.idx_of_element[node] )
__A = Node('''R''', -1)
__A = Node('''B''', 6)
__A = Node('''A''', 3)
__A = Node('''X''', 1)
__A = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__A = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | 354 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = DiTPipeline
snake_case__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
snake_case__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def lowerCamelCase__ ( self : Tuple ):
torch.manual_seed(0 )
__lowerCamelCase : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=UpperCAmelCase , )
__lowerCamelCase : List[str] = AutoencoderKL()
__lowerCamelCase : List[Any] = DDIMScheduler()
__lowerCamelCase : Optional[Any] = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any]=0 ):
if str(UpperCAmelCase ).startswith("mps" ):
__lowerCamelCase : List[str] = torch.manual_seed(UpperCAmelCase )
else:
__lowerCamelCase : List[str] = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__lowerCamelCase : str = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Dict = "cpu"
__lowerCamelCase : int = self.get_dummy_components()
__lowerCamelCase : Optional[Any] = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Optional[int] = self.get_dummy_inputs(UpperCAmelCase )
__lowerCamelCase : List[Any] = pipe(**UpperCAmelCase ).images
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCamelCase : Optional[int] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
__lowerCamelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1E-3 )
def lowerCamelCase__ ( self : Any ):
self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase__ ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : str = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__lowerCamelCase : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
__lowerCamelCase : Optional[int] = pipe.get_label_ids(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Dict = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Tuple = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__lowerCamelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__lowerCamelCase : Union[str, Any] = ["vase", "umbrella"]
__lowerCamelCase : int = pipe.get_label_ids(UpperCAmelCase )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : Dict = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1 | 64 | 0 |
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = tmp_path / "cache"
lowerCAmelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase : List[str] = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = tmp_path / "cache"
lowerCAmelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : Optional[Any] = features.copy() if features else default_expected_features
lowerCAmelCase : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : List[Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE ) ) as con:
lowerCAmelCase : int = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tmp_path / "cache"
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , "tmp.sql" )
lowerCAmelCase : Optional[Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(SCREAMING_SNAKE_CASE , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
lowerCAmelCase : List[Any] = iter_sql_file(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = iter_sql_file(SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Any = tmp_path / "cache"
lowerCAmelCase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , "tmp.sql" )
lowerCAmelCase : List[Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(SCREAMING_SNAKE_CASE , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
lowerCAmelCase : Any = iter_sql_file(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = iter_sql_file(SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : str = tmp_path / "cache"
lowerCAmelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE , "tmp.sql" )
lowerCAmelCase : Any = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE ).read()
with pytest.raises(SCREAMING_SNAKE_CASE ):
SqlDatasetWriter(SCREAMING_SNAKE_CASE , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
| 108 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__=None , snake_case__=None , *snake_case__ , **snake_case__ ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
if config is None:
assert isinstance(self.model , snake_case__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
lowerCAmelCase : Optional[int] = self.model.config
else:
lowerCAmelCase : List[str] = config
lowerCAmelCase : Any = data_args
lowerCAmelCase : Tuple = self.config.tgt_vocab_size if isinstance(self.config , snake_case__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
lowerCAmelCase : int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowerCAmelCase : Tuple = label_smoothed_nll_loss
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.optimizer is None:
lowerCAmelCase : Optional[int] = ["bias", "LayerNorm.weight"]
lowerCAmelCase : str = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
lowerCAmelCase : Union[str, Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowerCAmelCase : Dict = Adafactor
lowerCAmelCase : Optional[int] = {"scale_parameter": False, "relative_step": False}
else:
lowerCAmelCase : int = AdamW
lowerCAmelCase : int = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
lowerCAmelCase : Any = self.args.learning_rate
if self.sharded_ddp:
lowerCAmelCase : int = OSS(
params=snake_case__ , optim=snake_case__ , **snake_case__ , )
else:
lowerCAmelCase : Any = optimizer_cls(snake_case__ , **snake_case__ )
if self.lr_scheduler is None:
lowerCAmelCase : Tuple = self._get_lr_scheduler(snake_case__ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowerCAmelCase : Tuple = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowerCAmelCase : Any = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowerCAmelCase : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case__ )
return scheduler
def lowercase__ ( self ):
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowerCAmelCase : Dict = model(**snake_case__ , use_cache=snake_case__ )[0]
lowerCAmelCase : List[Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowerCAmelCase , lowerCAmelCase : str = model(**snake_case__ , labels=snake_case__ , use_cache=snake_case__ )[:2]
else:
# compute label smoothed loss
lowerCAmelCase : int = model(**snake_case__ , use_cache=snake_case__ )[0]
lowerCAmelCase : List[Any] = torch.nn.functional.log_softmax(snake_case__ , dim=-1 )
lowerCAmelCase , lowerCAmelCase : str = self.loss_fn(snake_case__ , snake_case__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = inputs.pop("labels" )
lowerCAmelCase , lowerCAmelCase : str = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
return loss
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ):
"""simple docstring"""
lowerCAmelCase : List[str] = self._prepare_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowerCAmelCase : Dict = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **snake_case__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase : Dict = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["max_length"] )
lowerCAmelCase : Optional[Any] = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
lowerCAmelCase , lowerCAmelCase : Dict = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[str] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowerCAmelCase : int = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase : Optional[int] = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["max_length"] )
return (loss, logits, labels)
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f""" padded to `max_length`={max_length}""" )
lowerCAmelCase : Optional[Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowerCAmelCase : int = tensor
return padded_tensor
| 108 | 1 |
"""simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_A = logging.get_logger(__name__)
class lowerCamelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
@experimental
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__a , __a , __a , __a , __a , __a , __a )
return _map_with_joblib(__a , __a , __a , __a , __a , __a , __a )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
UpperCAmelCase__ : Optional[Any] = num_proc if num_proc <= len(__a ) else len(__a )
UpperCAmelCase__ : Tuple = [] # We organize the splits ourselve (contiguous splits)
for index in range(__a ):
UpperCAmelCase__ : List[str] = len(__a ) // num_proc
UpperCAmelCase__ : int = len(__a ) % num_proc
UpperCAmelCase__ : List[str] = div * index + min(__a , __a )
UpperCAmelCase__ : str = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__a ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(__a )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(__a )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
UpperCAmelCase__ : Optional[int] = None, None
if not disable_tqdm:
UpperCAmelCase__ : Optional[Any] = (RLock(),), tqdm.set_lock
with Pool(__a , initargs=__a , initializer=__a ) as pool:
UpperCAmelCase__ : Dict = pool.map(__a , __a )
logger.info(F"""Finished {num_proc} processes""" )
UpperCAmelCase__ : Any = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(__a )} objects""" )
return mapped
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__a ):
return joblib.Parallel()(
joblib.delayed(__a )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def a__ ( lowerCAmelCase ) -> Optional[Any]:
UpperCAmelCase__ : Any = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
UpperCAmelCase__ : Tuple = None
| 350 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (DDIMParallelScheduler,)
SCREAMING_SNAKE_CASE = (('eta', 0.0), ('num_inference_steps', 5_0))
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**_lowerCamelCase )
return config
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = self.scheduler_classes[0]
UpperCAmelCase__ : List[str] = self.get_scheduler_config(**_lowerCamelCase )
UpperCAmelCase__ : Tuple = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : str = 10, 0.0
UpperCAmelCase__ : Tuple = self.dummy_model()
UpperCAmelCase__ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCamelCase )
for t in scheduler.timesteps:
UpperCAmelCase__ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : str = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
return sample
def _a (self ):
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCamelCase )
UpperCAmelCase__ : str = self.scheduler_classes[0]
UpperCAmelCase__ : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def _a (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , )
def _a (self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=_lowerCamelCase , num_inference_steps=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowerCamelCase , eta=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.scheduler_classes[0]
UpperCAmelCase__ : Dict = self.get_scheduler_config()
UpperCAmelCase__ : Any = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.scheduler_classes[0]
UpperCAmelCase__ : str = self.get_scheduler_config()
UpperCAmelCase__ : int = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = 10, 0.0
scheduler.set_timesteps(_lowerCamelCase )
UpperCAmelCase__ : Tuple = self.dummy_model()
UpperCAmelCase__ : Optional[Any] = self.dummy_sample_deter
UpperCAmelCase__ : str = self.dummy_sample_deter + 0.1
UpperCAmelCase__ : Any = self.dummy_sample_deter - 0.1
UpperCAmelCase__ : Tuple = samplea.shape[0]
UpperCAmelCase__ : Dict = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase__ : int = torch.arange(_lowerCamelCase )[0:3, None].repeat(1 , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase__ : int = scheduler.batch_step_no_noise(_lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowerCamelCase )
UpperCAmelCase__ : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.full_loop()
UpperCAmelCase__ : Optional[int] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase__ : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.01 )
UpperCAmelCase__ : List[str] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.01 )
UpperCAmelCase__ : Optional[int] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 166 | 0 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4)) | 8 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 | 1 |
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def __lowerCamelCase ( lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any ):
'''simple docstring'''
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict ):
'''simple docstring'''
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def __lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] ):
'''simple docstring'''
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
import math
import tensorflow as tf
from packaging import version
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
lowerCamelCase = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCamelCase ( lowerCamelCase__ : Dict ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
lowerCamelCase = tf.cast(math.pi , x.dtype )
lowerCamelCase = tf.cast(0.0_4_4_7_1_5 , x.dtype )
lowerCamelCase = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase__ , 3 )) ))
return x * cdf
def __lowerCamelCase ( lowerCamelCase__ : Any ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
return x * tf.tanh(tf.math.softplus(lowerCamelCase__ ) )
def __lowerCamelCase ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
lowerCamelCase = tf.cast(0.0_4_4_7_1_5 , x.dtype )
lowerCamelCase = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
lowerCamelCase = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
return tf.clip_by_value(_gelu(lowerCamelCase__ ) , -10 , 10 )
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int]=-1 ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = tf.split(lowerCamelCase__ , 2 , axis=lowerCamelCase__ )
return a * tf.math.sigmoid(lowerCamelCase__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
return tf.keras.activations.gelu(lowerCamelCase__ , approximate=lowerCamelCase__ )
UpperCAmelCase : Union[str, Any] = tf.keras.activations.gelu
UpperCAmelCase : Optional[Any] = approximate_gelu_wrap
else:
UpperCAmelCase : List[Any] = _gelu
UpperCAmelCase : str = _gelu_new
UpperCAmelCase : Union[str, Any] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def __lowerCamelCase ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 66 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
snake_case_ : Optional[int] = datasets.logging.get_logger(__name__)
snake_case_ : Tuple = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
snake_case_ : int = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
snake_case_ : str = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
snake_case_ : Tuple = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : str):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def lowerCamelCase ( self : Optional[Any] , _snake_case : Dict):
"""simple docstring"""
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''')
UpperCAmelCase_ = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
UpperCAmelCase_ = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
UpperCAmelCase_ = self.config_name.upper()
else:
raise KeyError(
F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""")
# download the model checkpoint specified by self.config_name and set up the scorer
UpperCAmelCase_ = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name])
UpperCAmelCase_ = score.BleurtScorer(os.path.join(_snake_case , _snake_case))
def lowerCamelCase ( self : Any , _snake_case : int , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.scorer.score(references=_snake_case , candidates=_snake_case)
return {"scores": scores}
| 51 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 0 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
A : str = {'UserAgent': UserAgent().random}
def __lowerCAmelCase ( a__ ) -> dict:
__a = script.contents[0]
__a = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __A:
def __init__( self , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = F"""https://www.instagram.com/{username}/"""
__a = self.get_json()
def SCREAMING_SNAKE_CASE_ ( self ) -> dict:
'''simple docstring'''
__a = requests.get(self.url , headers=_snake_case ).text
__a = BeautifulSoup(_snake_case , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ) -> str:
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
return self.user_data["username"]
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
return self.user_data["full_name"]
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
return self.user_data["biography"]
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
return self.user_data["business_email"]
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
return self.user_data["external_url"]
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> bool:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> bool:
'''simple docstring'''
return self.user_data["is_private"]
def __lowerCAmelCase ( a__ = "github" ) -> None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
__a = InstagramUser(a__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , a__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
A : List[Any] = InstagramUser('github')
print(instagram_user)
print(F"{instagram_user.number_of_posts = }")
print(F"{instagram_user.number_of_followers = }")
print(F"{instagram_user.number_of_followings = }")
print(F"{instagram_user.email = }")
print(F"{instagram_user.website = }")
print(F"{instagram_user.profile_picture_url = }")
print(F"{instagram_user.is_verified = }")
print(F"{instagram_user.is_private = }") | 33 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[int] = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 33 | 1 |
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> int:
"""simple docstring"""
stooge(UpperCamelCase__ , 0 , len(UpperCamelCase__ ) - 1 )
return arr
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : str ) -> Any:
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__lowerCamelCase , __lowerCamelCase = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__lowerCamelCase = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(UpperCamelCase__ , UpperCamelCase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(UpperCamelCase__ , i + t , (UpperCamelCase__) )
# Recursively sort first 2/3 elements
stooge(UpperCamelCase__ , UpperCamelCase__ , (h - t) )
if __name__ == "__main__":
__A = input("Enter numbers separated by a comma:\n").strip()
__A = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 90 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = 'convnextv2'
def __init__( self : Tuple ,lowercase__ : Dict=3 ,lowercase__ : int=4 ,lowercase__ : Optional[int]=4 ,lowercase__ : Optional[Any]=None ,lowercase__ : List[str]=None ,lowercase__ : Tuple="gelu" ,lowercase__ : Dict=0.0_2 ,lowercase__ : List[Any]=1e-1_2 ,lowercase__ : Optional[int]=0.0 ,lowercase__ : int=2_2_4 ,lowercase__ : int=None ,lowercase__ : Tuple=None ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = num_channels
__lowercase = patch_size
__lowercase = num_stages
__lowercase = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
__lowercase = [3, 3, 9, 3] if depths is None else depths
__lowercase = hidden_act
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = drop_path_rate
__lowercase = image_size
__lowercase = ['''stem'''] + [F"stage{idx}" for idx in range(1 ,len(self.depths ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowercase__ ,out_indices=lowercase__ ,stage_names=self.stage_names )
| 104 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase : Optional[int] = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 355 |
from __future__ import annotations
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
create_all_state(1 , UpperCamelCase__ , UpperCamelCase__ , [] , UpperCamelCase__ )
return result
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(UpperCamelCase__ , total_number - level + 2 ):
current_list.append(UpperCamelCase__ )
create_all_state(i + 1 , UpperCamelCase__ , level - 1 , UpperCamelCase__ , UpperCamelCase__ )
current_list.pop()
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
for i in total_list:
print(*UpperCamelCase__ )
if __name__ == "__main__":
_UpperCAmelCase : str = 4
_UpperCAmelCase : Tuple = 2
_UpperCAmelCase : Optional[int] = generate_all_combinations(n, k)
print_all_state(total_list)
| 200 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a__ : Any = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def _UpperCamelCase ( __A , __A=None , __A=None , __A=None ) -> int:
'''simple docstring'''
UpperCamelCase__ = True
while ask_again:
UpperCamelCase__ = input(__A )
try:
if default is not None and len(__A ) == 0:
return default
return convert_value(__A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__A )
def _UpperCamelCase ( __A , __A=[] , __A=None , __A=0 ) -> Any:
'''simple docstring'''
UpperCamelCase__ = BulletMenu(__A , __A )
UpperCamelCase__ = menu.run(default_choice=__A )
return convert_value(__A ) if convert_value is not None else result
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _UpperCamelCase ( __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _UpperCamelCase ( __A ) -> Any:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class lowercase_ ( argparse.RawDescriptionHelpFormatter ):
def __a ( self , a , a , a , a ):
UpperCamelCase__ = super()._format_usage(a , a , a , a )
UpperCamelCase__ = usage.replace("<command> [<args>] " , "" )
return usage
| 80 | import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=13 , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Dict=99 , lowerCamelCase_ : str=24 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : List[Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=512 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=1000 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = range_bbox
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase = bbox[i, j, 3]
UpperCamelCase = bbox[i, j, 1]
UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase = bbox[i, j, 2]
UpperCamelCase = bbox[i, j, 0]
UpperCamelCase = t
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = LiltModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LiltForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = LiltForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ):
"""simple docstring"""
return True
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = LiltModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = LiltModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCamelCase_ )
UpperCamelCase = torch.tensor([[1, 2]] , device=lowerCamelCase_ )
UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ )
UpperCamelCase = torch.Size([1, 2, 768] )
UpperCamelCase = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowerCamelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase_ , atol=1E-3 ) )
| 343 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCamelCase : int = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCamelCase ( cls : int):
'''simple docstring'''
__lowercase =TOKEN
HfFolder.save_token(_UpperCAmelCase)
@classmethod
def __lowerCamelCase ( cls : Optional[Any]):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-model-flax')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org')
except HTTPError:
pass
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
__lowercase =FlaxBertModel(_UpperCAmelCase)
model.push_to_hub('test-model-flax' , use_auth_token=self._token)
__lowercase =FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""")
__lowercase =flatten_dict(unfreeze(model.params))
__lowercase =flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
__lowercase =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""")
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_UpperCAmelCase , repo_id='test-model-flax' , push_to_hub=_UpperCAmelCase , use_auth_token=self._token)
__lowercase =FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""")
__lowercase =flatten_dict(unfreeze(model.params))
__lowercase =flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
__lowercase =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""")
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7)
__lowercase =FlaxBertModel(_UpperCAmelCase)
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token)
__lowercase =FlaxBertModel.from_pretrained('valid_org/test-model-flax-org')
__lowercase =flatten_dict(unfreeze(model.params))
__lowercase =flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
__lowercase =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""")
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_UpperCAmelCase , repo_id='valid_org/test-model-flax-org' , push_to_hub=_UpperCAmelCase , use_auth_token=self._token)
__lowercase =FlaxBertModel.from_pretrained('valid_org/test-model-flax-org')
__lowercase =flatten_dict(unfreeze(model.params))
__lowercase =flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
__lowercase =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""")
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =True
__lowercase =flatten_dict(modela.params )
__lowercase =flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__lowercase =False
return models_are_equal
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only')
__lowercase =FlaxBertModel(_UpperCAmelCase)
__lowercase ='bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_UpperCAmelCase , _UpperCAmelCase))
with self.assertRaises(_UpperCAmelCase):
__lowercase =FlaxBertModel.from_pretrained(_UpperCAmelCase)
__lowercase =FlaxBertModel.from_pretrained(_UpperCAmelCase , subfolder=_UpperCAmelCase)
self.assertTrue(check_models_equal(_UpperCAmelCase , _UpperCAmelCase))
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only')
__lowercase =FlaxBertModel(_UpperCAmelCase)
__lowercase ='bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_UpperCAmelCase , _UpperCAmelCase) , max_shard_size='10KB')
with self.assertRaises(_UpperCAmelCase):
__lowercase =FlaxBertModel.from_pretrained(_UpperCAmelCase)
__lowercase =FlaxBertModel.from_pretrained(_UpperCAmelCase , subfolder=_UpperCAmelCase)
self.assertTrue(check_models_equal(_UpperCAmelCase , _UpperCAmelCase))
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase ='bert'
__lowercase ='hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(_UpperCAmelCase):
__lowercase =FlaxBertModel.from_pretrained(_UpperCAmelCase)
__lowercase =FlaxBertModel.from_pretrained(_UpperCAmelCase , subfolder=_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase ='bert'
__lowercase ='hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(_UpperCAmelCase):
__lowercase =FlaxBertModel.from_pretrained(_UpperCAmelCase)
__lowercase =FlaxBertModel.from_pretrained(_UpperCAmelCase , subfolder=_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
| 366 |
'''simple docstring'''
from __future__ import annotations
import requests
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(_lowerCAmelCase ).json()
def _A ( _lowerCAmelCase = 10 ):
"""simple docstring"""
__lowercase ='https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
__lowercase =requests.get(_lowerCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_lowerCAmelCase ) for story_id in story_ids]
def _A ( _lowerCAmelCase = 10 ):
"""simple docstring"""
__lowercase =hackernews_top_stories(_lowerCAmelCase )
return "\n".join('* [{title}]({url})'.format(**_lowerCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( ) -> int:
_a : List[Any] =0
for i in range(1 ,1001 ):
total += i**i
return str(SCREAMING_SNAKE_CASE_ )[-10:]
if __name__ == "__main__":
print(solution())
| 276 |
'''simple docstring'''
import math
import unittest
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> str:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _snake_case ( self ) -> List[Any]:
with self.assertRaises(_lowerCAmelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 158 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=lowerCAmelCase_ , dtype=jnp.bfloataa )
__lowerCamelCase , __lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCAmelCase_ , from_pt=lowerCAmelCase_ , dtype=jnp.bfloataa )
__lowerCamelCase = controlnet_params
__lowerCamelCase = '''bird'''
__lowerCamelCase = jax.device_count()
__lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
__lowerCamelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
__lowerCamelCase = jax.random.PRNGKey(0 )
__lowerCamelCase = jax.random.split(lowerCAmelCase_ , jax.device_count() )
__lowerCamelCase = replicate(lowerCAmelCase_ )
__lowerCamelCase = shard(lowerCAmelCase_ )
__lowerCamelCase = shard(lowerCAmelCase_ )
__lowerCamelCase = pipe(
prompt_ids=lowerCAmelCase_ , image=lowerCAmelCase_ , params=lowerCAmelCase_ , prng_seed=lowerCAmelCase_ , num_inference_steps=50 , jit=lowerCAmelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
__lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
__lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCamelCase = jnp.array(
[0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=lowerCAmelCase_ , dtype=jnp.bfloataa )
__lowerCamelCase , __lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCAmelCase_ , from_pt=lowerCAmelCase_ , dtype=jnp.bfloataa )
__lowerCamelCase = controlnet_params
__lowerCamelCase = '''Chef in the kitchen'''
__lowerCamelCase = jax.device_count()
__lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
__lowerCamelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
__lowerCamelCase = jax.random.PRNGKey(0 )
__lowerCamelCase = jax.random.split(lowerCAmelCase_ , jax.device_count() )
__lowerCamelCase = replicate(lowerCAmelCase_ )
__lowerCamelCase = shard(lowerCAmelCase_ )
__lowerCamelCase = shard(lowerCAmelCase_ )
__lowerCamelCase = pipe(
prompt_ids=lowerCAmelCase_ , image=lowerCAmelCase_ , params=lowerCAmelCase_ , prng_seed=lowerCAmelCase_ , num_inference_steps=50 , jit=lowerCAmelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
__lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
__lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCamelCase = jnp.array(
[[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 365 | '''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Optional[Any] =LongformerTokenizer
lowerCamelCase : Optional[Any] =True
lowerCamelCase : List[str] =LongformerTokenizerFast
lowerCamelCase : Union[str, Any] =True
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowerCamelCase = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a ) )
def SCREAMING_SNAKE_CASE__ ( self : int , **a : int ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a )
def SCREAMING_SNAKE_CASE__ ( self : str , **a : Dict ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int ):
"""simple docstring"""
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowerCamelCase = tokenizer.tokenize(a ) # , add_prefix_space=True)
self.assertListEqual(a , a )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=a ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=a ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=a )
__lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a )
__lowerCamelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = '''Encode this sequence.'''
__lowerCamelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(a , a )
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(a , a )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__lowerCamelCase = tokenizer.encode(a , add_special_tokens=a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(a , a )
# Testing spaces after special tokens
__lowerCamelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(a , lstrip=a , rstrip=a )} ) # mask token has a left space
__lowerCamelCase = tokenizer.convert_tokens_to_ids(a )
__lowerCamelCase = '''Encode <mask> sequence'''
__lowerCamelCase = '''Encode <mask>sequence'''
__lowerCamelCase = tokenizer.encode(a )
__lowerCamelCase = encoded.index(a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(a , a )
__lowerCamelCase = tokenizer.encode(a )
__lowerCamelCase = encoded.index(a )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase = '''A, <mask> AllenNLP sentence.'''
__lowerCamelCase = tokenizer_r.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
__lowerCamelCase = tokenizer_p.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__lowerCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__lowerCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowerCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , a )
self.assertEqual(post_processor_state['''add_prefix_space'''] , a )
self.assertEqual(post_processor_state['''trim_offsets'''] , a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase = f"""{text_of_1_token} {text_of_1_token}"""
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ), len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ), len(a ) + 1 + len(a )) , )
__lowerCamelCase = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ), 1 + len(a ) + 1 + len(a )) , )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , add_prefix_space=a , trim_offsets=a )
__lowerCamelCase = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ), 1 + len(a ) + 1 + len(a )) , )
| 237 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
A : int = logging.get_logger(__name__)
def a__ ( __UpperCamelCase , __UpperCamelCase=False ):
SCREAMING_SNAKE_CASE_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_ = ""
else:
SCREAMING_SNAKE_CASE_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-config.hidden_size :]
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = dct.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = val
def a__ ( ):
SCREAMING_SNAKE_CASE_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=True ):
SCREAMING_SNAKE_CASE_ = ViTConfig()
# patch_size
if model_name[-1] == "8":
SCREAMING_SNAKE_CASE_ = 8
# set labels if required
if not base_model:
SCREAMING_SNAKE_CASE_ = 1_0_0_0
SCREAMING_SNAKE_CASE_ = "huggingface/label-files"
SCREAMING_SNAKE_CASE_ = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
SCREAMING_SNAKE_CASE_ = 3_8_4
SCREAMING_SNAKE_CASE_ = 1_5_3_6
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 6
# load original model from torch hub
SCREAMING_SNAKE_CASE_ = torch.hub.load("facebookresearch/dino:main" , __UpperCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE_ = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
if base_model:
SCREAMING_SNAKE_CASE_ = ViTModel(__UpperCamelCase , add_pooling_layer=__UpperCamelCase ).eval()
else:
SCREAMING_SNAKE_CASE_ = ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
SCREAMING_SNAKE_CASE_ = ViTImageProcessor()
SCREAMING_SNAKE_CASE_ = image_processor(images=prepare_img() , return_tensors="pt" )
SCREAMING_SNAKE_CASE_ = encoding["pixel_values"]
SCREAMING_SNAKE_CASE_ = model(__UpperCamelCase )
if base_model:
SCREAMING_SNAKE_CASE_ = original_model(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
SCREAMING_SNAKE_CASE_ = original_model(__UpperCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
A : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 118 | import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A : Tuple = get_tests_dir("fixtures")
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE_ = mock.Mock()
SCREAMING_SNAKE_CASE_ = 500
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = HTTPError
SCREAMING_SNAKE_CASE_ = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__magic_name__ ) as mock_head:
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : Optional[int] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def __A ( cls : int ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def __A ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(__magic_name__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__magic_name__ , repo_id="test-feature-extractor" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def __A ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(__magic_name__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__magic_name__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def __A ( self : List[Any] ) -> Dict:
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE_ = CustomFeatureExtractor.from_pretrained(__magic_name__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 118 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def UpperCAmelCase__ ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase ):
requests.request("GET", "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET", "https://huggingface.co", timeout=1.0 )
@pytest.mark.integration
def UpperCAmelCase__ ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET", "https://huggingface.co" )
def UpperCAmelCase__ ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase ):
http_head("https://huggingface.co" )
| 357 |
from collections.abc import Iterable
from typing import Generic, TypeVar
_UpperCAmelCase : Any = TypeVar("_T")
class __lowerCAmelCase ( Generic[_T]):
def __init__( self: Union[str, Any] , _lowerCAmelCase: Iterable[_T] | None = None ):
lowercase :list[_T] = list(iterable or [] )
lowercase :list[_T] = []
def __len__( self: Dict ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self: List[Any] ):
return F"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: _T ):
self._stacka.append(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :int = self._stacka.pop
lowercase :List[Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 158 | 0 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase__ :Dict = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
f'Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'
f' reinstalling {pkg}.' )
if not ops[op](version.parse(lowerCAmelCase__ ) , version.parse(lowerCAmelCase__ ) ):
raise ImportError(
f'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}' )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = None ):
'''simple docstring'''
lowercase = f'\n{hint}' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , lowerCAmelCase__ ):
lowercase , lowercase , lowercase = requirement, None, None
else:
lowercase = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f' got {requirement}' )
lowercase , lowercase = match[0]
lowercase = want_full.split(''',''' ) # there could be multiple requirements
lowercase = {}
for w in want_range:
lowercase = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , lowerCAmelCase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f' but got {requirement}' )
lowercase , lowercase = match[0]
lowercase = want_ver
if op not in ops:
raise ValueError(f'{requirement}: need one of {list(ops.keys() )}, but got {op}' )
# special case
if pkg == "python":
lowercase = '''.'''.join([str(lowerCAmelCase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return
# check if any version is installed
try:
lowercase = importlib.metadata.version(lowerCAmelCase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'The \'{requirement}\' distribution was not found and is required by this application. {hint}' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowerCAmelCase__ , lowerCAmelCase__ )
| 101 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : List[Any] =IFInpaintingPipeline
lowercase_ : Optional[int] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowercase_ : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ : str =PipelineTesterMixin.required_optional_params - {'''latents'''}
def A__ ( self):
return self._get_dummy_components()
def A__ ( self ,A__ ,A__=0):
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__)
lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__)
lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def A__ ( self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def A__ ( self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''')
def A__ ( self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1)
def A__ ( self):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def A__ ( self):
self._test_save_load_local()
def A__ ( self):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
| 101 | 1 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Dict = None
UpperCAmelCase : str = BloomTokenizerFast
UpperCAmelCase : Union[str, Any] = BloomTokenizerFast
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : int = "tokenizer_file"
UpperCAmelCase : Optional[Any] = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def __snake_case ( self ) -> Union[str, Any]:
super().setUp()
lowerCAmelCase = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self , **A_ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def __snake_case ( self ) -> str:
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
lowerCAmelCase = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
lowerCAmelCase = tokenizer.batch_encode_plus(lowercase_ )["""input_ids"""]
self.assertListEqual(lowercase_ , lowercase_ )
lowerCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def __snake_case ( self , A_=6 ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowerCAmelCase = """This is a simple input"""
lowerCAmelCase = ["""This is a simple input 1""", """This is a simple input 2"""]
lowerCAmelCase = ("""This is a simple input""", """This is a pair""")
lowerCAmelCase = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(lowercase_ , max_length=lowercase_ )
tokenizer_r.encode_plus(lowercase_ , max_length=lowercase_ )
tokenizer_r.batch_encode_plus(lowercase_ , max_length=lowercase_ )
tokenizer_r.encode(lowercase_ , max_length=lowercase_ )
tokenizer_r.batch_encode_plus(lowercase_ , max_length=lowercase_ )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
lowerCAmelCase = None # Hotfixing padding = None
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="""max_length""" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="""max_length""" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" , )
def __snake_case ( self ) -> Dict:
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=lowercase_ )
lowerCAmelCase = next(iter(lowercase_ ) )["""premise"""] # pick up one data
lowerCAmelCase = list(sample_data.values() )
lowerCAmelCase = list(map(tokenizer.encode , lowercase_ ) )
lowerCAmelCase = [tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ ) for x in output_tokens]
self.assertListEqual(lowercase_ , lowercase_ )
def __snake_case ( self ) -> Union[str, Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 ) | 371 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( _SCREAMING_SNAKE_CASE : int | str ) -> bool:
"""simple docstring"""
lowerCAmelCase = str(_SCREAMING_SNAKE_CASE )
return n == n[::-1]
def _snake_case ( _SCREAMING_SNAKE_CASE : int = 1_000_000 ) -> Dict:
"""simple docstring"""
lowerCAmelCase = 0
for i in range(1 , _SCREAMING_SNAKE_CASE ):
if is_palindrome(_SCREAMING_SNAKE_CASE ) and is_palindrome(bin(_SCREAMING_SNAKE_CASE ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip())))) | 187 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : str ={
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _A ( lowerCAmelCase ):
snake_case__ : List[str] = 'bert'
def __init__( self , __lowerCAmelCase=3_0522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase="absolute" , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = use_cache
lowercase = classifier_dropout
class _A ( lowerCAmelCase ):
@property
def A__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 197 | """simple docstring"""
from __future__ import annotations
from typing import Any
class _A :
def __init__( self , __lowerCAmelCase = 6 ):
"""simple docstring"""
lowercase = None
lowercase = None
self.create_linked_list(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = Node()
lowercase = current_node
lowercase = current_node
lowercase = current_node
for _ in range(1 , __lowerCAmelCase ):
lowercase = Node()
lowercase = current_node
lowercase = previous_node
lowercase = current_node
lowercase = self.front
lowercase = previous_node
def A__ ( self ):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A__ ( self ):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase = self.rear.next
if self.rear:
lowercase = data
def A__ ( self ):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase = self.front.data
lowercase = None
return data
lowercase = self.front
lowercase = old_front.next
lowercase = old_front.data
lowercase = None
return data
def A__ ( self ):
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""" )
def A__ ( self ):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = None
lowercase = None
lowercase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197 | 1 |
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
snake_case_ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
snake_case_ = """main"""
# Default branch name
snake_case_ = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
snake_case_ = """aaaaaaa"""
# This commit does not exist, so we should 404.
snake_case_ = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
snake_case_ = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def _lowerCAmelCase ( ):
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def _lowerCAmelCase ( ):
print('Bonjour!' )
yield
print('Au revoir!' )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class A_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[str] ) -> Any:
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :int ) -> Optional[Any]:
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Tuple ) -> Optional[int]:
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
self.assertEqual(find_labels(lowercase_ ) , ['labels'] )
self.assertEqual(find_labels(lowercase_ ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(lowercase_ ) , ['start_positions', 'end_positions'] )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(lowercase_ ) , ['labels'] )
@require_tf
def UpperCAmelCase__ ( self :str ) -> Tuple:
self.assertEqual(find_labels(lowercase_ ) , ['labels'] )
self.assertEqual(find_labels(lowercase_ ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(lowercase_ ) , ['start_positions', 'end_positions'] )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(lowercase_ ) , ['labels'] )
@require_flax
def UpperCAmelCase__ ( self :int ) -> Dict:
# Flax models don't have labels
self.assertEqual(find_labels(lowercase_ ) , [] )
self.assertEqual(find_labels(lowercase_ ) , [] )
self.assertEqual(find_labels(lowercase_ ) , [] )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(lowercase_ ) , [] ) | 356 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case_ = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 181 | 0 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "linear"
SCREAMING_SNAKE_CASE_ = "cosine"
SCREAMING_SNAKE_CASE_ = "cosine_with_restarts"
SCREAMING_SNAKE_CASE_ = "polynomial"
SCREAMING_SNAKE_CASE_ = "constant"
SCREAMING_SNAKE_CASE_ = "constant_with_warmup"
SCREAMING_SNAKE_CASE_ = "piecewise_constant"
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = -1 ) -> Optional[Any]:
return LambdaLR(UpperCAmelCase , lambda UpperCAmelCase : 1 , last_epoch=UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = -1 ) -> Optional[Any]:
def lr_lambda(UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase ) / float(max(1.0 , UpperCAmelCase ) )
return 1.0
return LambdaLR(UpperCAmelCase , UpperCAmelCase , last_epoch=UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = -1 ) -> List[Any]:
snake_case_ = {}
snake_case_ = step_rules.split(',' )
for rule_str in rule_list[:-1]:
snake_case_ , snake_case_ = rule_str.split(':' )
snake_case_ = int(UpperCAmelCase )
snake_case_ = float(UpperCAmelCase )
snake_case_ = value
snake_case_ = float(rule_list[-1] )
def create_rules_function(UpperCAmelCase , UpperCAmelCase ):
def rule_func(UpperCAmelCase ) -> float:
snake_case_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
snake_case_ = create_rules_function(UpperCAmelCase , UpperCAmelCase )
return LambdaLR(UpperCAmelCase , UpperCAmelCase , last_epoch=UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=-1 ) -> Optional[int]:
def lr_lambda(UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase ) / float(max(1 , UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.5 , UpperCAmelCase = -1 ) -> Optional[Any]:
def lr_lambda(UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase ) / float(max(1 , UpperCAmelCase ) )
snake_case_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = -1 ) -> Optional[int]:
def lr_lambda(UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase ) / float(max(1 , UpperCAmelCase ) )
snake_case_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1e-7 , UpperCAmelCase=1.0 , UpperCAmelCase=-1 ) -> List[str]:
snake_case_ = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(UpperCAmelCase ) / float(max(1 , UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
snake_case_ = lr_init - lr_end
snake_case_ = num_training_steps - num_warmup_steps
snake_case_ = 1 - (current_step - num_warmup_steps) / decay_steps
snake_case_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__UpperCamelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 1.0 , UpperCAmelCase = -1 , ) -> Union[str, Any]:
snake_case_ = SchedulerType(UpperCAmelCase )
snake_case_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(UpperCAmelCase , last_epoch=UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(UpperCAmelCase , step_rules=UpperCAmelCase , last_epoch=UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(UpperCAmelCase , num_warmup_steps=UpperCAmelCase , last_epoch=UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
UpperCAmelCase , num_warmup_steps=UpperCAmelCase , num_training_steps=UpperCAmelCase , num_cycles=UpperCAmelCase , last_epoch=UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
UpperCAmelCase , num_warmup_steps=UpperCAmelCase , num_training_steps=UpperCAmelCase , power=UpperCAmelCase , last_epoch=UpperCAmelCase , )
return schedule_func(
UpperCAmelCase , num_warmup_steps=UpperCAmelCase , num_training_steps=UpperCAmelCase , last_epoch=UpperCAmelCase )
| 69 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any=7 ):
"""simple docstring"""
_snake_case : Any = None
if token is not None:
_snake_case : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
_snake_case : List[str] = """636036"""
_snake_case : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
_snake_case : str = requests.get(snake_case__ , headers=snake_case__ ).json()
return result["workflow_runs"]
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : str = get_daily_ci_runs(snake_case__ )
_snake_case : str = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_snake_case : List[str] = workflow_run["""id"""]
break
return workflow_run_id
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[Any] = get_last_daily_ci_runs(snake_case__ )
if workflow_run_id is not None:
_snake_case : Optional[Any] = get_artifacts_links(worflow_run_id=snake_case__ , token=snake_case__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_snake_case : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case__ , artifact_url=snake_case__ , output_dir=snake_case__ , token=snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
get_last_daily_ci_artifacts(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = {}
for artifact_name in artifact_names:
_snake_case : int = os.path.join(snake_case__ , F"{artifact_name}.zip" )
if os.path.isfile(snake_case__ ):
_snake_case : Tuple = {}
with zipfile.ZipFile(snake_case__ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case__ ):
# read the file
with z.open(snake_case__ ) as f:
_snake_case : Any = f.read().decode("""UTF-8""" )
return results
| 64 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
UpperCamelCase = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
UpperCamelCase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _lowerCamelCase ( _UpperCamelCase ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = ['input_ids', 'attention_mask']
snake_case = MBartTokenizer
snake_case = []
snake_case = []
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , )->List[str]:
'''simple docstring'''
A_ : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
A_ : str = vocab_file
A_ : Dict = False if not self.vocab_file else True
A_ : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
A_ : Dict = {
lang_code: self.convert_tokens_to_ids(_UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A_ : Dict = src_lang if src_lang is not None else 'en_XX'
A_ : Any = self.convert_tokens_to_ids(self._src_lang )
A_ : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _snake_case ( self )->Dict:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->int:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Any:
'''simple docstring'''
A_ : Any = [self.sep_token_id]
A_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
A_ : Tuple = src_lang
A_ : Dict = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
A_ : Optional[int] = self.convert_tokens_to_ids(_UpperCAmelCase )
A_ : List[str] = tgt_lang_id
return inputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "en_XX" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "ro_RO" , **_SCREAMING_SNAKE_CASE , )->str:
'''simple docstring'''
A_ : Optional[int] = src_lang
A_ : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : Union[str, Any] = self.convert_tokens_to_ids(_UpperCAmelCase )
A_ : Any = []
A_ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
A_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
A_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
A_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : List[Any] = self.convert_tokens_to_ids(_UpperCAmelCase )
A_ : List[str] = []
A_ : int = [self.eos_token_id, self.cur_lang_code]
A_ : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
A_ : Any = self.convert_ids_to_tokens(self.suffix_tokens )
A_ : int = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Dict:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
A_ : Dict = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 368 |
import math
import random
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCamelCase = 0.02
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : str = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(SCREAMING_SNAKE_CASE ):
# Forward propagation
A_ : Optional[Any] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A_ : Any = (expected / 100) - layer_a
# Error delta
A_ : List[str] = layer_1_error * sigmoid_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = int(input("""Expected value: """))
UpperCamelCase = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 65 | 0 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('T')
class _SCREAMING_SNAKE_CASE ( Generic[T] ):
UpperCAmelCase_ :deque[T] # Cache store of keys
UpperCAmelCase_ :set[T] # References of the keys in cache
UpperCAmelCase_ :int = 10 # Maximum capacity of cache
def __init__( self , __A ) -> None:
lowerCAmelCase_ :Tuple = deque()
lowerCAmelCase_ :Any = set()
if not n:
lowerCAmelCase_ :str = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
lowerCAmelCase_ :Any = n
def __lowerCAmelCase ( self , __A ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCAmelCase_ :int = self.dq_store.pop()
self.key_reference.remove(__A )
else:
self.dq_store.remove(__A )
self.dq_store.appendleft(__A )
self.key_reference.add(__A )
def __lowerCAmelCase ( self ) -> None:
for k in self.dq_store:
print(__A )
def __repr__( self ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 84 |
'''simple docstring'''
import numpy
# List of input, output pairs
lowerCamelCase = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCamelCase = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowerCamelCase = [2, 4, 1, 5]
lowerCamelCase = len(train_data)
lowerCamelCase = 0.0_0_9
def _A ( _lowerCAmelCase , _lowerCAmelCase="train" ):
"""simple docstring"""
return calculate_hypothesis_value(_lowerCAmelCase , _lowerCAmelCase ) - output(
_lowerCAmelCase , _lowerCAmelCase )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =0
for i in range(len(_lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _A ( _lowerCAmelCase , _lowerCAmelCase=m ):
"""simple docstring"""
__lowercase =0
for i in range(_lowerCAmelCase ):
if index == -1:
summation_value += _error(_lowerCAmelCase )
else:
summation_value += _error(_lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =summation_of_cost_derivative(_lowerCAmelCase , _lowerCAmelCase ) / m
return cost_derivative_value
def _A ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowercase =0.00_00_02
__lowercase =0
__lowercase =0
while True:
j += 1
__lowercase =[0, 0, 0, 0]
for i in range(0 , len(_lowerCAmelCase ) ):
__lowercase =get_cost_derivative(i - 1 )
__lowercase =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase , rtol=_lowerCAmelCase , ):
break
__lowercase =temp_parameter_vector
print(('Number of iterations:', j) )
def _A ( ):
"""simple docstring"""
for i in range(len(_lowerCAmelCase ) ):
print(('Actual output value:', output(_lowerCAmelCase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(_lowerCAmelCase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 166 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE (_snake_case ):
"""simple docstring"""
__a =['pixel_values']
def __init__( self : str , __a : List[str] = True , __a : int = None , __a : str = PILImageResampling.BICUBIC , __a : Optional[Any] = True , __a : Union[str, Any] = None , __a : int = True , __a : Tuple = 1 / 2_55 , __a : int = True , __a : Dict = None , __a : List[str] = None , __a : Optional[int] = True , **__a : List[Any] , ):
super().__init__(**UpperCamelCase__ )
_a = size if size is not None else {"shortest_edge": 2_24}
_a = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
_a = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
_a = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="crop_size" )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a = image_std if image_std is not None else OPENAI_CLIP_STD
_a = do_convert_rgb
def UpperCamelCase__ ( self : List[Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] = PILImageResampling.BICUBIC , __a : Optional[Any] = None , **__a : Optional[int] , ):
_a = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(UpperCamelCase__ , size=size["shortest_edge"] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase__ ( self : Optional[Any] , __a : Optional[Any] , __a : List[str] , __a : Union[str, Any] = None , **__a : Any , ):
_a = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(UpperCamelCase__ , size=(size["height"], size["width"]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase__ ( self : Any , __a : Tuple , __a : Optional[Any] , __a : Tuple = None , **__a : Optional[Any] , ):
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase__ ( self : int , __a : Optional[Any] , __a : Tuple , __a : Union[str, Any] , __a : int = None , **__a : Optional[Any] , ):
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase__ ( self : Tuple , __a : Any , __a : Optional[Any] = None , __a : Dict = None , __a : Optional[int] = None , __a : str = None , __a : str = None , __a : Optional[Any] = None , __a : List[Any] = None , __a : List[str] = None , __a : List[str] = None , __a : Optional[Any] = None , __a : List[str] = None , __a : str = None , __a : Any = ChannelDimension.FIRST , **__a : Optional[Any] , ):
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(UpperCamelCase__ , param_name="size" , default_to_square=UpperCamelCase__ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(UpperCamelCase__ , param_name="crop_size" , default_to_square=UpperCamelCase__ )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
_a = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
_a = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
_a = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
_a = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
_a = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
_a = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 363 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , *__a : Optional[int] , **__a : List[str] ):
super().__init__(*__a , **__a )
self.check_model_type(__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict=None , __a : int=None , __a : Optional[Any]=None , **__a : List[Any] ):
_a , _a = {}, {}
if padding is not None:
_a = padding
if truncation is not None:
_a = truncation
if top_k is not None:
_a = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , __a : Union["Image.Image", str] , __a : str = None , **__a : Any ):
if isinstance(__a , (Image.Image, str) ) and isinstance(__a , __a ):
_a = {"image": image, "question": question}
else:
_a = image
_a = super().__call__(__a , **__a )
return results
def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : Optional[Any]=False , __a : List[Any]=False ):
_a = load_image(inputs["image"] )
_a = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=__a , truncation=__a )
_a = self.image_processor(images=__a , return_tensors=self.framework )
model_inputs.update(__a )
return model_inputs
def UpperCamelCase__ ( self : List[Any] , __a : List[str] ):
_a = self.model(**__a )
return model_outputs
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : Dict=5 ):
if top_k > self.model.config.num_labels:
_a = self.model.config.num_labels
if self.framework == "pt":
_a = model_outputs.logits.sigmoid()[0]
_a , _a = probs.topk(__a )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
_a = scores.tolist()
_a = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 346 | 0 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCamelCase__ ( ):
raise RuntimeError("CUDA out of memory.")
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ) -> List[str]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE : List[Any] = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(4 , 5 )
def __UpperCamelCase ( self : Optional[int] , a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(a ) ) )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(a : str ):
nonlocal batch_sizes
batch_sizes.append(a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(a , [128, 64, 32, 16, 8] )
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(a : List[Any] , a : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = mock_training_loop_function("hello" )
self.assertListEqual(a , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(a : Any ):
pass
with self.assertRaises(a ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(a : Any ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(a ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(a : List[str] , a : List[str] , a : Any ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(a ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(a : List[Any] ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(a ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def __UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE : Optional[Any] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , a )
SCREAMING_SNAKE_CASE : List[Any] = release_memory(a )
self.assertEqual(torch.cuda.memory_allocated() , a ) | 76 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Dict , snake_case: Optional[Any] , snake_case: Tuple=13 , snake_case: Any=32 , snake_case: Union[str, Any]=2 , snake_case: Tuple=3 , snake_case: Union[str, Any]=16 , snake_case: Union[str, Any]=[1, 2, 1] , snake_case: Optional[Any]=[2, 2, 4] , snake_case: str=2 , snake_case: List[str]=2.0 , snake_case: Optional[int]=True , snake_case: Union[str, Any]=0.0 , snake_case: Optional[int]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[str]="gelu" , snake_case: Any=False , snake_case: Optional[Any]=True , snake_case: Optional[int]=0.0_2 , snake_case: Any=1E-5 , snake_case: Optional[int]=True , snake_case: int=None , snake_case: Any=True , snake_case: str=10 , snake_case: Optional[Any]=8 , snake_case: Union[str, Any]=["stage1", "stage2", "stage3"] , snake_case: Tuple=[1, 2, 3] , ) -> Dict:
snake_case_ :Dict = parent
snake_case_ :List[Any] = batch_size
snake_case_ :Dict = image_size
snake_case_ :Dict = patch_size
snake_case_ :Tuple = num_channels
snake_case_ :List[Any] = embed_dim
snake_case_ :List[str] = depths
snake_case_ :str = num_heads
snake_case_ :Tuple = window_size
snake_case_ :Tuple = mlp_ratio
snake_case_ :int = qkv_bias
snake_case_ :Tuple = hidden_dropout_prob
snake_case_ :Optional[Any] = attention_probs_dropout_prob
snake_case_ :Dict = drop_path_rate
snake_case_ :Any = hidden_act
snake_case_ :Any = use_absolute_embeddings
snake_case_ :int = patch_norm
snake_case_ :List[Any] = layer_norm_eps
snake_case_ :Tuple = initializer_range
snake_case_ :str = is_training
snake_case_ :int = scope
snake_case_ :Tuple = use_labels
snake_case_ :Tuple = type_sequence_label_size
snake_case_ :str = encoder_stride
snake_case_ :List[Any] = out_features
snake_case_ :str = out_indices
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ :str = None
if self.use_labels:
snake_case_ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ :Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self: int ) -> Optional[Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase_ ( self: List[Any] , snake_case: str , snake_case: int , snake_case: List[str] ) -> Any:
snake_case_ :Dict = MaskFormerSwinModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Tuple = model(snake_case )
snake_case_ :Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ :Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: int , snake_case: List[str] , snake_case: Tuple ) -> Union[str, Any]:
snake_case_ :Any = MaskFormerSwinBackbone(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Optional[Any] = model(snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(snake_case ):
snake_case_ :Optional[Any] = ["""stem"""]
snake_case_ :str = MaskFormerSwinBackbone(config=snake_case )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
snake_case_ :Optional[int] = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ :str = config_and_inputs
snake_case_ :Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_A : str = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
_A : List[str] = False
_A : Any = False
_A : Dict = False
_A : List[Any] = False
_A : Optional[int] = False
def lowerCAmelCase_ ( self: Dict ) -> Any:
snake_case_ :str = MaskFormerSwinModelTester(self )
snake_case_ :Optional[Any] = ConfigTester(self , config_class=snake_case , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self: Any ) -> Tuple:
return
def lowerCAmelCase_ ( self: Any ) -> Any:
snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> int:
snake_case_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case )
@unittest.skip("""Swin does not use inputs_embeds""" )
def lowerCAmelCase_ ( self: str ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
pass
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :str = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ :Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Optional[int] = model_class(snake_case )
snake_case_ :str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ :str = [*signature.parameters.keys()]
snake_case_ :str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Any , snake_case: List[str] ) -> str:
snake_case_ :List[str] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :List[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :Any = outputs.hidden_states
snake_case_ :Optional[int] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case ) , snake_case )
# Swin has a different seq_length
snake_case_ :str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case_ :Tuple = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :List[Any] = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple:
snake_case_, snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :List[Any] = 3
snake_case_ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ :List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case_ :str = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :Any = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowerCAmelCase_ ( self: List[str] ) -> str:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowerCAmelCase_ ( self: str ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case_, snake_case_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(snake_case: str ):
snake_case_ :Optional[int] = 0
return t
def check_equivalence(snake_case: List[Any] , snake_case: Union[str, Any] , snake_case: int , snake_case: Tuple={} ):
with torch.no_grad():
snake_case_ :List[Any] = model(**snake_case , return_dict=snake_case , **snake_case )
snake_case_ :Any = model(**snake_case , return_dict=snake_case , **snake_case ).to_tuple()
def recursive_check(snake_case: List[Any] , snake_case: int ):
if isinstance(snake_case , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(snake_case , snake_case ):
recursive_check(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(snake_case , snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(snake_case ) , set_nan_tensor_to_zero(snake_case ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}. Dict has"""
f""" `nan`: {torch.isnan(snake_case ).any()} and `inf`: {torch.isinf(snake_case )}."""
) , )
recursive_check(snake_case , snake_case )
for model_class in self.all_model_classes:
snake_case_ :int = model_class(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Any = self._prepare_for_class(snake_case , snake_case )
snake_case_ :List[Any] = self._prepare_for_class(snake_case , snake_case )
check_equivalence(snake_case , snake_case , snake_case )
snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
check_equivalence(snake_case , snake_case , snake_case )
snake_case_ :Tuple = self._prepare_for_class(snake_case , snake_case )
snake_case_ :Any = self._prepare_for_class(snake_case , snake_case )
check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} )
snake_case_ :Dict = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
snake_case_ :List[str] = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
check_equivalence(snake_case , snake_case , snake_case , {"""output_hidden_states""": True} )
@require_torch
class lowerCamelCase ( unittest.TestCase , _lowerCAmelCase ):
'''simple docstring'''
_A : int = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_A : Tuple = MaskFormerSwinConfig
def lowerCAmelCase_ ( self: List[str] ) -> Optional[int]:
snake_case_ :Optional[Any] = MaskFormerSwinModelTester(self )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Tuple = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
snake_case_ :List[str] = backbone_class(snake_case )
backbone.to(snake_case )
backbone.eval()
snake_case_ :List[Any] = backbone(**snake_case )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , snake_case )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
snake_case_ :Union[str, Any] = backbone(**snake_case , output_hidden_states=snake_case )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
snake_case_, snake_case_, snake_case_ :List[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
snake_case_ :List[Any] = backbone(**snake_case , output_attentions=snake_case )
self.assertIsNotNone(outputs.attentions )
| 66 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Union[str, Any] = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 357 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
a__ : Tuple =set()
a__ : Optional[Any] =[]
def parse_line(SCREAMING_SNAKE_CASE : Optional[int] ):
for line in fp:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
a__ : str =line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(SCREAMING_SNAKE_CASE ) > 0:
a__ : Union[str, Any] ="\n".join(SCREAMING_SNAKE_CASE )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(SCREAMING_SNAKE_CASE )
buffer.clear()
continue
else:
a__ : Optional[Any] =line.strip()
buffer.append(SCREAMING_SNAKE_CASE )
if from_gh:
for filename in os.listdir(SCREAMING_SNAKE_CASE ):
a__ : str =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
# read the file
if filename != "warnings.txt":
continue
with open(SCREAMING_SNAKE_CASE ) as fp:
parse_line(SCREAMING_SNAKE_CASE )
else:
try:
with zipfile.ZipFile(SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
# read the file
if filename != "warnings.txt":
continue
with z.open(SCREAMING_SNAKE_CASE ) as fp:
parse_line(SCREAMING_SNAKE_CASE )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
a__ : Optional[int] =set()
a__ : Any =[os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for p in os.listdir(SCREAMING_SNAKE_CASE ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return selected_warnings
if __name__ == "__main__":
def _A ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
return values.split("," )
UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
UpperCAmelCase : List[Any] = parser.parse_args()
UpperCAmelCase : str = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCAmelCase : Dict = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCAmelCase : Tuple = extract_warnings(args.output_dir, args.targets)
UpperCAmelCase : Optional[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 148 | 0 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Optional[int] = ["vqvae"]
def __init__( self : Dict , A : AutoencoderKL , A : UNetaDConditionModel , A : Mel , A : Union[DDIMScheduler, DDPMScheduler] , ) -> List[str]:
super().__init__()
self.register_modules(unet=A , scheduler=A , mel=A , vqvae=A )
def A ( self : List[str] ) -> int:
return 50 if isinstance(self.scheduler , A ) else 10_00
@torch.no_grad()
def __call__( self : List[str] , A : int = 1 , A : str = None , A : np.ndarray = None , A : int = 0 , A : int = 0 , A : int = None , A : torch.Generator = None , A : float = 0 , A : float = 0 , A : torch.Generator = None , A : float = 0 , A : torch.Tensor = None , A : torch.Tensor = None , A : Tuple=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
lowercase_ : Dict = steps or self.get_default_steps()
self.scheduler.set_timesteps(A )
lowercase_ : List[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowercase_ : List[Any] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowercase_ : int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=A , device=self.device , )
lowercase_ : Dict = noise
lowercase_ : List[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(A , A )
lowercase_ : int = self.mel.audio_slice_to_image(A )
lowercase_ : Tuple = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
lowercase_ : Any = (input_image / 2_55) * 2 - 1
lowercase_ : int = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowercase_ : str = self.vqvae.encode(torch.unsqueeze(A , 0 ) ).latent_dist.sample(
generator=A )[0]
lowercase_ : Optional[Any] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowercase_ : Any = self.scheduler.add_noise(A , A , self.scheduler.timesteps[start_step - 1] )
lowercase_ : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowercase_ : Optional[Any] = int(mask_start_secs * pixels_per_second )
lowercase_ : int = int(mask_end_secs * pixels_per_second )
lowercase_ : Union[str, Any] = self.scheduler.add_noise(A , A , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , A ):
lowercase_ : Optional[int] = self.unet(A , A , A )['''sample''']
else:
lowercase_ : int = self.unet(A , A )['''sample''']
if isinstance(self.scheduler , A ):
lowercase_ : List[Any] = self.scheduler.step(
model_output=A , timestep=A , sample=A , eta=A , generator=A , )['''prev_sample''']
else:
lowercase_ : int = self.scheduler.step(
model_output=A , timestep=A , sample=A , generator=A , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
lowercase_ : List[Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
lowercase_ : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowercase_ : Optional[Any] = 1 / self.vqvae.config.scaling_factor * images
lowercase_ : str = self.vqvae.decode(A )['''sample''']
lowercase_ : Tuple = (images / 2 + 0.5).clamp(0 , 1 )
lowercase_ : Optional[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowercase_ : List[str] = (images * 2_55).round().astype('''uint8''' )
lowercase_ : Optional[int] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(A , mode='''RGB''' ).convert('''L''' ) for _ in images) )
lowercase_ : Any = [self.mel.image_to_audio(A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(A )[:, np.newaxis, :] ) , **ImagePipelineOutput(A ) )
@torch.no_grad()
def A ( self : List[str] , A : List[Image.Image] , A : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , A )
self.scheduler.set_timesteps(A )
lowercase_ : List[str] = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
lowercase_ : Union[str, Any] = (sample / 2_55) * 2 - 1
lowercase_ : Union[str, Any] = torch.Tensor(A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowercase_ : Union[str, Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowercase_ : Dict = self.scheduler.alphas_cumprod[t]
lowercase_ : int = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowercase_ : str = 1 - alpha_prod_t
lowercase_ : Dict = self.unet(A , A )['''sample''']
lowercase_ : List[Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowercase_ : List[str] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowercase_ : int = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def A ( A : torch.Tensor , A : torch.Tensor , A : float ) -> torch.Tensor:
lowercase_ : List[str] = acos(torch.dot(torch.flatten(A ) , torch.flatten(A ) ) / torch.norm(A ) / torch.norm(A ) )
return sin((1 - alpha) * theta ) * xa / sin(A ) + sin(alpha * theta ) * xa / sin(A )
| 33 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__A : str = parser.parse_args()
__A : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__A : Dict = CLIPImageProcessor()
__A : Union[str, Any] = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__A : List[str] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 33 | 1 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ):
'''simple docstring'''
if isinstance(__lowerCamelCase , torch.Tensor ):
return image
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
_UpperCAmelCase : List[Any] =[image]
if isinstance(image[0] , PIL.Image.Image ):
_UpperCAmelCase : List[Any] =[np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_UpperCAmelCase : List[str] =np.concatenate(__lowerCamelCase , axis=0 )
_UpperCAmelCase : Optional[Any] =np.array(__lowerCamelCase ).astype(np.floataa ) / 2_5_5.0
_UpperCAmelCase : List[Any] =image.transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase : str =2.0 * image - 1.0
_UpperCAmelCase : Optional[Any] =torch.from_numpy(__lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
_UpperCAmelCase : List[Any] =torch.cat(__lowerCamelCase , dim=0 )
return image
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int]=0.99_95 ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , np.ndarray ):
_UpperCAmelCase : Optional[Any] =True
_UpperCAmelCase : int =va.device
_UpperCAmelCase : List[Any] =va.cpu().numpy()
_UpperCAmelCase : Tuple =va.cpu().numpy()
_UpperCAmelCase : Any =np.sum(va * va / (np.linalg.norm(__lowerCamelCase ) * np.linalg.norm(__lowerCamelCase )) )
if np.abs(__lowerCamelCase ) > DOT_THRESHOLD:
_UpperCAmelCase : Union[str, Any] =(1 - t) * va + t * va
else:
_UpperCAmelCase : Optional[int] =np.arccos(__lowerCamelCase )
_UpperCAmelCase : Tuple =np.sin(__lowerCamelCase )
_UpperCAmelCase : str =theta_a * t
_UpperCAmelCase : List[Any] =np.sin(__lowerCamelCase )
_UpperCAmelCase : List[str] =np.sin(theta_a - theta_t ) / sin_theta_a
_UpperCAmelCase : str =sin_theta_t / sin_theta_a
_UpperCAmelCase : int =sa * va + sa * va
if inputs_are_torch:
_UpperCAmelCase : Union[str, Any] =torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
return va
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =F.normalize(__lowerCamelCase , dim=-1 )
_UpperCAmelCase : List[Any] =F.normalize(__lowerCamelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
for param in model.parameters():
_UpperCAmelCase : Dict =value
class __magic_name__ ( UpperCamelCase__ ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case , text_encoder=snake_case , clip_model=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , feature_extractor=snake_case , coca_model=snake_case , coca_tokenizer=snake_case , coca_transform=snake_case , )
_UpperCAmelCase : List[Any] =(
feature_extractor.size
if isinstance(feature_extractor.size , snake_case)
else feature_extractor.size['shortest_edge']
)
_UpperCAmelCase : str =transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std)
set_requires_grad(self.text_encoder , snake_case)
set_requires_grad(self.clip_model , snake_case)
def lowerCAmelCase ( self , snake_case = "auto") -> List[Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCAmelCase : Union[str, Any] =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
self.enable_attention_slicing(snake_case)
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
set_requires_grad(self.vae , snake_case)
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
set_requires_grad(self.vae , snake_case)
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
set_requires_grad(self.unet , snake_case)
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
set_requires_grad(self.unet , snake_case)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =min(int(num_inference_steps * strength) , snake_case)
_UpperCAmelCase : Any =max(num_inference_steps - init_timestep , 0)
_UpperCAmelCase : int =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=None) -> Optional[int]:
'''simple docstring'''
if not isinstance(snake_case , torch.Tensor):
raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(snake_case)}")
_UpperCAmelCase : str =image.to(device=snake_case , dtype=snake_case)
if isinstance(snake_case , snake_case):
_UpperCAmelCase : Optional[Any] =[
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(snake_case)
]
_UpperCAmelCase : Tuple =torch.cat(snake_case , dim=0)
else:
_UpperCAmelCase : List[Any] =self.vae.encode(snake_case).latent_dist.sample(snake_case)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCAmelCase : Optional[int] =0.1_82_15 * init_latents
_UpperCAmelCase : List[str] =init_latents.repeat_interleave(snake_case , dim=0)
_UpperCAmelCase : Union[str, Any] =randn_tensor(init_latents.shape , generator=snake_case , device=snake_case , dtype=snake_case)
# get latents
_UpperCAmelCase : Optional[int] =self.scheduler.add_noise(snake_case , snake_case , snake_case)
_UpperCAmelCase : List[Any] =init_latents
return latents
def lowerCAmelCase ( self , snake_case) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =self.coca_transform(snake_case).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
_UpperCAmelCase : str =self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype))
_UpperCAmelCase : Tuple =self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split('<end_of_text>')[0].replace('<start_of_text>' , '').rstrip(' .,')
def lowerCAmelCase ( self , snake_case , snake_case) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any =self.feature_extractor.preprocess(snake_case)
_UpperCAmelCase : Optional[Any] =torch.from_numpy(clip_image_input['pixel_values'][0]).unsqueeze(0).to(self.device).half()
_UpperCAmelCase : Dict =self.clip_model.get_image_features(snake_case)
_UpperCAmelCase : int =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case)
_UpperCAmelCase : List[str] =image_embeddings_clip.repeat_interleave(snake_case , dim=0)
return image_embeddings_clip
@torch.enable_grad()
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =latents.detach().requires_grad_()
_UpperCAmelCase : str =self.scheduler.scale_model_input(snake_case , snake_case)
# predict the noise residual
_UpperCAmelCase : int =self.unet(snake_case , snake_case , encoder_hidden_states=snake_case).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
_UpperCAmelCase : Optional[int] =self.scheduler.alphas_cumprod[timestep]
_UpperCAmelCase : Any =1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase : str =(latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_UpperCAmelCase : Union[str, Any] =torch.sqrt(snake_case)
_UpperCAmelCase : List[str] =pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , snake_case):
_UpperCAmelCase : Optional[int] =self.scheduler.sigmas[index]
_UpperCAmelCase : Tuple =latents - sigma * noise_pred
else:
raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCAmelCase : Tuple =1 / 0.1_82_15 * sample
_UpperCAmelCase : Optional[Any] =self.vae.decode(snake_case).sample
_UpperCAmelCase : Tuple =(image / 2 + 0.5).clamp(0 , 1)
_UpperCAmelCase : int =transforms.Resize(self.feature_extractor_size)(snake_case)
_UpperCAmelCase : Optional[int] =self.normalize(snake_case).to(latents.dtype)
_UpperCAmelCase : str =self.clip_model.get_image_features(snake_case)
_UpperCAmelCase : str =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case)
_UpperCAmelCase : Optional[int] =spherical_dist_loss(snake_case , snake_case).mean() * clip_guidance_scale
_UpperCAmelCase : List[str] =-torch.autograd.grad(snake_case , snake_case)[0]
if isinstance(self.scheduler , snake_case):
_UpperCAmelCase : Optional[Any] =latents.detach() + grads * (sigma**2)
_UpperCAmelCase : str =noise_pred_original
else:
_UpperCAmelCase : str =noise_pred_original - torch.sqrt(snake_case) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , snake_case , snake_case , snake_case = None , snake_case = None , snake_case = 5_1_2 , snake_case = 5_1_2 , snake_case = 0.6 , snake_case = 5_0 , snake_case = 7.5 , snake_case = 1 , snake_case = 0.0 , snake_case = 1_0_0 , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = 0.8 , snake_case = 0.1 , snake_case = 0.1 , ) -> List[str]:
'''simple docstring'''
if isinstance(snake_case , snake_case) and len(snake_case) != batch_size:
raise ValueError(f"You have passed {batch_size} batch_size, but only {len(snake_case)} generators.")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if isinstance(snake_case , torch.Generator) and batch_size > 1:
_UpperCAmelCase : List[str] =[generator] + [None] * (batch_size - 1)
_UpperCAmelCase : Tuple =[
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_UpperCAmelCase : Tuple =[x[0] for x in coca_is_none if x[1]]
_UpperCAmelCase : Union[str, Any] =', '.join(snake_case)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(snake_case):
raise ValueError(
f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.")
_UpperCAmelCase : Optional[int] =self.get_image_description(snake_case)
if style_prompt is None:
if len(snake_case):
raise ValueError(
f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.")
_UpperCAmelCase : List[str] =self.get_image_description(snake_case)
# get prompt text embeddings for content and style
_UpperCAmelCase : Optional[Any] =self.tokenizer(
snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=snake_case , return_tensors='pt' , )
_UpperCAmelCase : Dict =self.text_encoder(content_text_input.input_ids.to(self.device))[0]
_UpperCAmelCase : Optional[int] =self.tokenizer(
snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=snake_case , return_tensors='pt' , )
_UpperCAmelCase : Tuple =self.text_encoder(style_text_input.input_ids.to(self.device))[0]
_UpperCAmelCase : List[Any] =slerp(snake_case , snake_case , snake_case)
# duplicate text embeddings for each generation per prompt
_UpperCAmelCase : Optional[Any] =text_embeddings.repeat_interleave(snake_case , dim=0)
# set timesteps
_UpperCAmelCase : Any ='offset' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
_UpperCAmelCase : int ={}
if accepts_offset:
_UpperCAmelCase : Union[str, Any] =1
self.scheduler.set_timesteps(snake_case , **snake_case)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
_UpperCAmelCase : int =self.get_timesteps(snake_case , snake_case , self.device)
_UpperCAmelCase : Dict =timesteps[:1].repeat(snake_case)
# Preprocess image
_UpperCAmelCase : int =preprocess(snake_case , snake_case , snake_case)
_UpperCAmelCase : Tuple =self.prepare_latents(
snake_case , snake_case , snake_case , text_embeddings.dtype , self.device , snake_case)
_UpperCAmelCase : Optional[Any] =preprocess(snake_case , snake_case , snake_case)
_UpperCAmelCase : List[Any] =self.prepare_latents(
snake_case , snake_case , snake_case , text_embeddings.dtype , self.device , snake_case)
_UpperCAmelCase : List[Any] =slerp(snake_case , snake_case , snake_case)
if clip_guidance_scale > 0:
_UpperCAmelCase : Optional[int] =self.get_clip_image_embeddings(snake_case , snake_case)
_UpperCAmelCase : int =self.get_clip_image_embeddings(snake_case , snake_case)
_UpperCAmelCase : Dict =slerp(
snake_case , snake_case , snake_case)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_UpperCAmelCase : int =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_UpperCAmelCase : Union[str, Any] =content_text_input.input_ids.shape[-1]
_UpperCAmelCase : List[str] =self.tokenizer([''] , padding='max_length' , max_length=snake_case , return_tensors='pt')
_UpperCAmelCase : Union[str, Any] =self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
_UpperCAmelCase : List[Any] =uncond_embeddings.repeat_interleave(snake_case , dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase : Any =torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_UpperCAmelCase : str =(batch_size, self.unet.config.in_channels, height // 8, width // 8)
_UpperCAmelCase : Union[str, Any] =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_UpperCAmelCase : int =torch.randn(snake_case , generator=snake_case , device='cpu' , dtype=snake_case).to(
self.device)
else:
_UpperCAmelCase : Optional[int] =torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
_UpperCAmelCase : List[str] =latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase : str =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase : List[str] ='eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
_UpperCAmelCase : Union[str, Any] ={}
if accepts_eta:
_UpperCAmelCase : Optional[int] =eta
# check if the scheduler accepts generator
_UpperCAmelCase : Union[str, Any] ='generator' in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
_UpperCAmelCase : Dict =generator
with self.progress_bar(total=snake_case):
for i, t in enumerate(snake_case):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase : Dict =torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_UpperCAmelCase : Optional[int] =self.scheduler.scale_model_input(snake_case , snake_case)
# predict the noise residual
_UpperCAmelCase : Optional[int] =self.unet(snake_case , snake_case , encoder_hidden_states=snake_case).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_UpperCAmelCase : int =noise_pred.chunk(2)
_UpperCAmelCase : Dict =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_UpperCAmelCase : Tuple =(
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
_UpperCAmelCase : Optional[int] =self.cond_fn(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : List[str] =self.scheduler.step(snake_case , snake_case , snake_case , **snake_case).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCAmelCase : Optional[Any] =1 / 0.1_82_15 * latents
_UpperCAmelCase : Optional[int] =self.vae.decode(snake_case).sample
_UpperCAmelCase : str =(image / 2 + 0.5).clamp(0 , 1)
_UpperCAmelCase : Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCAmelCase : List[str] =self.numpy_to_pil(snake_case)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case)
| 353 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase =logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ):
@register_to_config
def __init__( self , snake_case , snake_case = None , snake_case = None) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : List[Any] =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCAmelCase : str =torch.zeros(snake_case , snake_case)
else:
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : int =torch.nn.Parameter(snake_case)
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=snake_case , transformer=snake_case , text_encoder=snake_case , tokenizer=snake_case , scheduler=snake_case , learned_classifier_free_sampling_embeddings=snake_case , )
def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int =len(snake_case) if isinstance(snake_case , snake_case) else 1
# get prompt text embeddings
_UpperCAmelCase : Optional[Any] =self.tokenizer(
snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_UpperCAmelCase : Union[str, Any] =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCAmelCase : str =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}")
_UpperCAmelCase : Union[str, Any] =text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCAmelCase : Optional[int] =self.text_encoder(text_input_ids.to(self.device))[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCAmelCase : List[str] =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case)
# duplicate text embeddings for each generation per prompt
_UpperCAmelCase : Optional[Any] =prompt_embeds.repeat_interleave(snake_case , dim=0)
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCAmelCase : Dict =self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCAmelCase : Any =negative_prompt_embeds.unsqueeze(0).repeat(snake_case , 1 , 1)
else:
_UpperCAmelCase : str =[''] * batch_size
_UpperCAmelCase : Dict =text_input_ids.shape[-1]
_UpperCAmelCase : str =self.tokenizer(
snake_case , padding='max_length' , max_length=snake_case , truncation=snake_case , return_tensors='pt' , )
_UpperCAmelCase : str =self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# See comment for normalizing text embeddings
_UpperCAmelCase : Tuple =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase : int =negative_prompt_embeds.shape[1]
_UpperCAmelCase : List[str] =negative_prompt_embeds.repeat(1 , snake_case , 1)
_UpperCAmelCase : Optional[int] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase : str =torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
@torch.no_grad()
def __call__( self , snake_case , snake_case = 1_0_0 , snake_case = 5.0 , snake_case = 1.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(snake_case , snake_case):
_UpperCAmelCase : Tuple =1
elif isinstance(snake_case , snake_case):
_UpperCAmelCase : int =len(snake_case)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case)}")
_UpperCAmelCase : Optional[Any] =batch_size * num_images_per_prompt
_UpperCAmelCase : Union[str, Any] =guidance_scale > 1.0
_UpperCAmelCase : Any =self._encode_prompt(snake_case , snake_case , snake_case)
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case)}.")
# get the initial completely masked latents unless the user supplied it
_UpperCAmelCase : List[Any] =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCAmelCase : Optional[Any] =self.transformer.num_vector_embeds - 1
_UpperCAmelCase : Optional[int] =torch.full(snake_case , snake_case).to(self.device)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f" {self.transformer.num_vector_embeds - 1} (inclusive).")
_UpperCAmelCase : Optional[Any] =latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(snake_case , device=self.device)
_UpperCAmelCase : int =self.scheduler.timesteps.to(self.device)
_UpperCAmelCase : Dict =latents
for i, t in enumerate(self.progress_bar(snake_case)):
# expand the sample if we are doing classifier free guidance
_UpperCAmelCase : Union[str, Any] =torch.cat([sample] * 2) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCAmelCase : Optional[Any] =self.transformer(snake_case , encoder_hidden_states=snake_case , timestep=snake_case).sample
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Dict =model_output.chunk(2)
_UpperCAmelCase : Dict =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case , dim=1 , keepdim=snake_case)
_UpperCAmelCase : Any =self.truncate(snake_case , snake_case)
# remove `log(0)`'s (`-inf`s)
_UpperCAmelCase : int =model_output.clamp(-7_0)
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : List[Any] =self.scheduler.step(snake_case , timestep=snake_case , sample=snake_case , generator=snake_case).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case)
_UpperCAmelCase : List[str] =self.vqvae.config.vq_embed_dim
_UpperCAmelCase : Optional[int] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCAmelCase : int =self.vqvae.quantize.get_codebook_entry(snake_case , shape=snake_case)
_UpperCAmelCase : str =self.vqvae.decode(snake_case , force_not_quantize=snake_case).sample
_UpperCAmelCase : str =(image / 2 + 0.5).clamp(0 , 1)
_UpperCAmelCase : Tuple =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCAmelCase : Optional[int] =self.numpy_to_pil(snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case)
def lowerCAmelCase ( self , snake_case , snake_case) -> torch.FloatTensor:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict =torch.sort(snake_case , 1 , descending=snake_case)
_UpperCAmelCase : Dict =torch.exp(snake_case)
_UpperCAmelCase : str =sorted_p_x_0.cumsum(dim=1) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCAmelCase : Optional[int] =torch.full_like(keep_mask[:, 0:1, :] , snake_case)
_UpperCAmelCase : Any =torch.cat((all_true, keep_mask) , dim=1)
_UpperCAmelCase : Dict =keep_mask[:, :-1, :]
_UpperCAmelCase : Any =keep_mask.gather(1 , indices.argsort(1))
_UpperCAmelCase : str =log_p_x_0.clone()
_UpperCAmelCase : Any =-torch.inf # -inf = log(0)
return rv
| 242 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Union[str, Any]=13 ,_UpperCAmelCase : Optional[int]=30 ,_UpperCAmelCase : Union[str, Any]=2 ,_UpperCAmelCase : List[str]=3 ,_UpperCAmelCase : Any=True ,_UpperCAmelCase : Tuple=True ,_UpperCAmelCase : Tuple=32 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : str=4 ,_UpperCAmelCase : int=37 ,_UpperCAmelCase : int="gelu" ,_UpperCAmelCase : List[str]=0.1 ,_UpperCAmelCase : Optional[Any]=0.1 ,_UpperCAmelCase : List[Any]=10 ,_UpperCAmelCase : str=0.02 ,_UpperCAmelCase : List[str]=None ,):
_a : int = parent
_a : str = batch_size
_a : Tuple = image_size
_a : str = patch_size
_a : List[str] = num_channels
_a : Union[str, Any] = is_training
_a : Union[str, Any] = use_labels
_a : List[Any] = hidden_size
_a : Tuple = num_hidden_layers
_a : List[str] = num_attention_heads
_a : List[Any] = intermediate_size
_a : Union[str, Any] = hidden_act
_a : Optional[Any] = hidden_dropout_prob
_a : Dict = attention_probs_dropout_prob
_a : Optional[Any] = type_sequence_label_size
_a : List[str] = initializer_range
_a : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : int = (image_size // patch_size) ** 2
_a : List[str] = num_patches + 1
def __lowercase ( self : str ):
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Any = None
if self.use_labels:
_a : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : List[str] ):
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any ):
_a : Optional[int] = ViTMSNModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ):
_a : Optional[int] = self.type_sequence_label_size
_a : Any = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Optional[Any] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : Tuple = 1
_a : Optional[Any] = ViTMSNForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Dict ):
_a : int = self.prepare_config_and_inputs()
_a , _a , _a : List[str] = config_and_inputs
_a : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Any = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCAmelCase : Optional[int] = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Optional[int] = False
def __lowercase ( self : Tuple ):
_a : List[Any] = ViTMSNModelTester(self )
_a : Dict = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def __lowercase ( self : str ):
pass
def __lowercase ( self : List[str] ):
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase ,nn.Linear ) )
def __lowercase ( self : Optional[int] ):
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_UpperCAmelCase )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[str] = [*signature.parameters.keys()]
_a : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : int ):
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def __lowercase ( self : List[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : List[str] = ViTMSNModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCamelCase ( ) -> Tuple:
_a : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def __lowercase ( self : Tuple ):
torch.manual_seed(2 )
_a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_UpperCAmelCase )
_a : List[str] = self.default_image_processor
_a : Dict = prepare_img()
_a : Union[str, Any] = image_processor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : List[str] = model(**_UpperCAmelCase )
# verify the logits
_a : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : Any = torch.tensor([-0.08_03, -0.44_54, -0.23_75] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
| 89 |
'''simple docstring'''
from typing import Any
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Dict = data
_SCREAMING_SNAKE_CASE : Optional[int] = None
def __repr__( self ):
return f"""Node({self.data})"""
class lowercase__ :
'''simple docstring'''
def __init__( self ):
_SCREAMING_SNAKE_CASE : Any = None
def __iter__( self ):
_SCREAMING_SNAKE_CASE : Any = self.head
while node:
yield node.data
_SCREAMING_SNAKE_CASE : List[Any] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(__snake_case ) for item in self] )
def __getitem__( self , __snake_case ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , __snake_case , __snake_case ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
_SCREAMING_SNAKE_CASE : Any = self.head
for _ in range(__snake_case ):
_SCREAMING_SNAKE_CASE : List[Any] = current.next
_SCREAMING_SNAKE_CASE : Dict = data
def UpperCAmelCase_ ( self , __snake_case ):
self.insert_nth(len(self ) , __snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
self.insert_nth(0 , __snake_case )
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
_SCREAMING_SNAKE_CASE : Optional[int] = Node(__snake_case )
if self.head is None:
_SCREAMING_SNAKE_CASE : str = new_node
elif index == 0:
_SCREAMING_SNAKE_CASE : Tuple = self.head # link new_node to head
_SCREAMING_SNAKE_CASE : str = new_node
else:
_SCREAMING_SNAKE_CASE : Tuple = self.head
for _ in range(index - 1 ):
_SCREAMING_SNAKE_CASE : List[str] = temp.next
_SCREAMING_SNAKE_CASE : Tuple = temp.next
_SCREAMING_SNAKE_CASE : Dict = new_node
def UpperCAmelCase_ ( self ): # print every node data
print(self )
def UpperCAmelCase_ ( self ):
return self.delete_nth(0 )
def UpperCAmelCase_ ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase_ ( self , __snake_case = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
_SCREAMING_SNAKE_CASE : Tuple = self.head # default first node
if index == 0:
_SCREAMING_SNAKE_CASE : List[Any] = self.head.next
else:
_SCREAMING_SNAKE_CASE : Tuple = self.head
for _ in range(index - 1 ):
_SCREAMING_SNAKE_CASE : Any = temp.next
_SCREAMING_SNAKE_CASE : Any = temp.next
_SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next.next
return delete_node.data
def UpperCAmelCase_ ( self ):
return self.head is None
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = None
_SCREAMING_SNAKE_CASE : int = self.head
while current:
# Store the current node's next node.
_SCREAMING_SNAKE_CASE : List[Any] = current.next
# Make the current node's next point backwards
_SCREAMING_SNAKE_CASE : Tuple = prev
# Make the previous node be the current node
_SCREAMING_SNAKE_CASE : int = current
# Make the current node the next node (to progress iteration)
_SCREAMING_SNAKE_CASE : Any = next_node
# Return prev in order to put the head at the end
_SCREAMING_SNAKE_CASE : Union[str, Any] = prev
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(SCREAMING_SNAKE_CASE__ ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE__ , i + 1 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(SCREAMING_SNAKE_CASE__ ) == 9
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_SCREAMING_SNAKE_CASE : List[str] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(-8 , 1 ) )
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = [
-9,
100,
Node(7734_5112 ),
"""dlrow olleH""",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"""Hello, world!""",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
_SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_SCREAMING_SNAKE_CASE : Any = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_SCREAMING_SNAKE_CASE : List[Any] = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_SCREAMING_SNAKE_CASE : Optional[int] = linked_list.delete_nth(10 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(SCREAMING_SNAKE_CASE__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE__ )
assert (
str(SCREAMING_SNAKE_CASE__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def snake_case_ ( ):
"""simple docstring"""
from doctest import testmod
testmod()
_SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(SCREAMING_SNAKE_CASE__ )
print("""\nReading/changing Node data using indexing:""" )
print(f"""Element at Position 1: {linked_list[1]}""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(SCREAMING_SNAKE_CASE__ )
print(f"""length of linked_list is : {len(SCREAMING_SNAKE_CASE__ )}""" )
if __name__ == "__main__":
main()
| 200 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[int] ):
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
_snake_case : Any = sum(snake_case__ ) / len(snake_case__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : int = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> int:
return (pow(snake_case__ , 2 ) + step) % modulus
for _ in range(snake_case__ ):
# These track the position within the cycle detection logic.
_snake_case : Optional[int] = seed
_snake_case : str = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_snake_case : Any = rand_fn(snake_case__ , snake_case__ , snake_case__ )
_snake_case : Optional[Any] = rand_fn(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = rand_fn(snake_case__ , snake_case__ , snake_case__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_snake_case : str = gcd(hare - tortoise , snake_case__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_snake_case : Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
A_ = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
A_ = parser.parse_args()
A_ = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
A_ = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 132 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.