code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase : Union[str, Any] = _modexpt(_lowercase , exponent // 2 , _lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_lowercase , exponent - 1 , _lowercase )) % modulo_value
def __lowerCamelCase ( _lowercase = 1_7_7_7 , _lowercase = 1_8_5_5 , _lowercase = 8 ) -> int:
UpperCAmelCase : Tuple = base
for _ in range(1 , _lowercase ):
UpperCAmelCase : List[Any] = _modexpt(_lowercase , _lowercase , 1_0**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 265 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : Dict = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def __lowerCamelCase ( _lowercase ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape
UpperCAmelCase : Dict = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase : Optional[Any] = emb.weight.data
return lin_layer
def __lowerCamelCase ( _lowercase , _lowercase=None ) -> List[Any]:
UpperCAmelCase : Dict = {}
for old_key in state_dict.keys():
UpperCAmelCase : str = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase : int = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
UpperCAmelCase : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
UpperCAmelCase : str = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
UpperCAmelCase : Tuple = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
UpperCAmelCase : List[Any] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
UpperCAmelCase : Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase : str = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
UpperCAmelCase : Optional[int] = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
UpperCAmelCase : str = state_dict[old_key]
return new_dict
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ) -> Tuple:
UpperCAmelCase : Any = []
UpperCAmelCase : Dict = 0
os.makedirs(_lowercase , exist_ok=_lowercase )
for expert in range(_lowercase ):
UpperCAmelCase : str = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowercase ):
UpperCAmelCase : Optional[Any] = torch.load(_lowercase )["""model"""]
remove_ignore_keys_(_lowercase )
UpperCAmelCase : List[str] = rename_fairseq_keys(_lowercase , _lowercase )
UpperCAmelCase : Optional[Any] = os.path.join(
_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) )
torch.save(_lowercase , _lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowercase )[0]].dtype )
# Add the last block
UpperCAmelCase : Optional[Any] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) )
UpperCAmelCase : Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowercase )
UpperCAmelCase : Optional[Any] = rename_fairseq_keys(_lowercase , _lowercase )
UpperCAmelCase : int = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowercase ) == 1:
UpperCAmelCase : Dict = os.path.join(_lowercase , _lowercase )
torch.save(_lowercase , _lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowercase , _lowercase )
# Otherwise, let's build the index
UpperCAmelCase : Optional[int] = {}
for idx, shard in enumerate(_lowercase ):
UpperCAmelCase : List[str] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase ):05d}.bin''' )
UpperCAmelCase : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowercase , os.path.join(_lowercase , _lowercase ) )
for key in shard:
UpperCAmelCase : Tuple = shard_file
# Add the metadata
UpperCAmelCase : Any = {"""total_size""": total_size}
UpperCAmelCase : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowercase , _lowercase ) , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase : str = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + """\n"""
f.write(_lowercase )
return metadata, index
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a : int = parser.parse_args()
a , a : Any = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
a : str = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
a : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 265 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 265 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
a : Union[str, Any] = torch.device("""cpu""")
def __lowerCamelCase ( ) -> Any:
UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
def __lowerCamelCase ( _lowercase ) -> Dict:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : Union[str, Any] = dct.pop(_lowercase )
UpperCAmelCase : str = val
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCAmelCase : Tuple = []
for k in state_dict.keys():
UpperCAmelCase : Dict = k
if ".pwconv" in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
UpperCAmelCase : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
UpperCAmelCase : Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
UpperCAmelCase : Optional[Any] = k_new.split(""".""" )
if ls[2].isdigit():
UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase : List[Any] = 1_0_0_0
UpperCAmelCase : List[str] = """huggingface/label-files"""
UpperCAmelCase : Tuple = """imagenet-1k-id2label.json"""
UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Tuple = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Tuple = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase : List[Any] = [3, 3, 6, 4]
UpperCAmelCase : int = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase : str = [3, 3, 9, 6]
UpperCAmelCase : str = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase : List[Any] = [4, 3, 1_0, 5]
UpperCAmelCase : Union[str, Any] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase : Any = [4, 4, 1_2, 6]
UpperCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase )
else:
UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" )
UpperCAmelCase : str = checkpoint
UpperCAmelCase : Tuple = create_rename_keys(_lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# load HuggingFace model
UpperCAmelCase : str = SwiftFormerForImageClassification(_lowercase ).eval()
hf_model.load_state_dict(_lowercase )
# prepare test inputs
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
UpperCAmelCase : List[str] = processor(images=_lowercase , return_tensors="""pt""" )
# compare outputs from both models
UpperCAmelCase : List[str] = get_expected_output(_lowercase )
UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
a : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 265 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a : Tuple = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : int = b.T
UpperCAmelCase : List[str] = np.sum(np.square(_lowercase ) , axis=1 )
UpperCAmelCase : List[str] = np.sum(np.square(_lowercase ) , axis=0 )
UpperCAmelCase : Union[str, Any] = np.matmul(_lowercase , _lowercase )
UpperCAmelCase : str = aa[:, None] - 2 * ab + ba[None, :]
return d
def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple:
UpperCAmelCase : Optional[Any] = x.reshape(-1 , 3 )
UpperCAmelCase : List[Any] = squared_euclidean_distance(_lowercase , _lowercase )
return np.argmin(_lowercase , axis=1 )
class UpperCamelCase_ ( __magic_name__ ):
lowercase = ['pixel_values']
def __init__( self , A = None , A = True , A = None , A = PILImageResampling.BILINEAR , A = True , A = True , **A , ) -> None:
super().__init__(**A )
UpperCAmelCase : Union[str, Any] = size if size is not None else {"""height""": 256, """width""": 256}
UpperCAmelCase : int = get_size_dict(A )
UpperCAmelCase : Optional[int] = np.array(A ) if clusters is not None else None
UpperCAmelCase : str = do_resize
UpperCAmelCase : str = size
UpperCAmelCase : str = resample
UpperCAmelCase : Optional[int] = do_normalize
UpperCAmelCase : List[Any] = do_color_quantize
def _lowercase( self , A , A , A = PILImageResampling.BILINEAR , A = None , **A , ) -> np.ndarray:
UpperCAmelCase : int = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def _lowercase( self , A , A = None , ) -> np.ndarray:
UpperCAmelCase : str = rescale(image=A , scale=1 / 1_2_7.5 , data_format=A )
UpperCAmelCase : List[Any] = image - 1
return image
def _lowercase( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
UpperCAmelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : Optional[int] = size if size is not None else self.size
UpperCAmelCase : int = get_size_dict(A )
UpperCAmelCase : int = resample if resample is not None else self.resample
UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : Optional[int] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCAmelCase : Dict = clusters if clusters is not None else self.clusters
UpperCAmelCase : Dict = np.array(A )
UpperCAmelCase : Union[str, Any] = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase : Any = [to_numpy_array(A ) for image in images]
if do_resize:
UpperCAmelCase : Optional[int] = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_normalize:
UpperCAmelCase : List[Any] = [self.normalize(image=A ) for image in images]
if do_color_quantize:
UpperCAmelCase : Dict = [to_channel_dimension_format(A , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCAmelCase : int = np.array(A )
UpperCAmelCase : List[Any] = color_quantize(A , A ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCAmelCase : List[Any] = images.shape[0]
UpperCAmelCase : Optional[Any] = images.reshape(A , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCAmelCase : List[str] = list(A )
else:
UpperCAmelCase : List[str] = [to_channel_dimension_format(A , A ) for image in images]
UpperCAmelCase : Tuple = {"""input_ids""": images}
return BatchFeature(data=A , tensor_type=A )
| 265 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowerCamelCase ( ) -> Any:
raise RuntimeError("""CUDA out of memory.""" )
class UpperCamelCase_ ( nn.Module ):
def __init__( self ) -> Any:
super().__init__()
UpperCAmelCase : Tuple = nn.Linear(3 , 4 )
UpperCAmelCase : Tuple = nn.BatchNormad(4 )
UpperCAmelCase : int = nn.Linear(4 , 5 )
def _lowercase( self , A ) -> Any:
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(A , [128, 64, 32, 16, 8] )
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase , UpperCAmelCase : Optional[int] = mock_training_loop_function("""hello""" )
self.assertListEqual(A , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def _lowercase( self ) -> Any:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(A ):
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A , A ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(A ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def _lowercase( self ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated()
UpperCAmelCase : List[str] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , A )
UpperCAmelCase : Tuple = release_memory(A )
self.assertEqual(torch.cuda.memory_allocated() , A )
| 265 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : str = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 265 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[int] = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 265 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self , A , A ) -> List[str]:
UpperCAmelCase : Tuple = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Dict = None
UpperCAmelCase : Dict = 20
UpperCAmelCase : Optional[Any] = self._get_uniform_logits(batch_size=2 , length=A )
# tweak scores to not be uniform anymore
UpperCAmelCase : Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCAmelCase : Union[str, Any] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCAmelCase : Any = jax.nn.softmax(A , axis=-1 )
UpperCAmelCase : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCAmelCase : Union[str, Any] = jax.nn.softmax(temp_dist_warper_sharper(A , scores.copy() , cur_len=A ) , axis=-1 )
UpperCAmelCase : int = jax.nn.softmax(temp_dist_warper_smoother(A , scores.copy() , cur_len=A ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : int = 10
UpperCAmelCase : Optional[int] = 2
# create ramp distribution
UpperCAmelCase : Tuple = np.broadcast_to(np.arange(A )[None, :] , (batch_size, vocab_size) ).copy()
UpperCAmelCase : List[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCAmelCase : List[Any] = FlaxTopKLogitsWarper(3 )
UpperCAmelCase : int = top_k_warp(A , A , cur_len=A )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCAmelCase : str = 5
UpperCAmelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
UpperCAmelCase : Dict = np.broadcast_to(np.arange(A )[None, :] , (batch_size, length) ).copy()
UpperCAmelCase : Tuple = top_k_warp_safety_check(A , A , cur_len=A )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Tuple = 10
UpperCAmelCase : Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCAmelCase : Dict = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
UpperCAmelCase : Union[str, Any] = FlaxTopPLogitsWarper(0.8 )
UpperCAmelCase : Optional[Any] = np.exp(top_p_warp(A , A , cur_len=A ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCAmelCase : Union[str, Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# check edge cases with negative and extreme logits
UpperCAmelCase : List[str] = np.broadcast_to(np.arange(A )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCAmelCase : Dict = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
UpperCAmelCase : str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
UpperCAmelCase : int = top_p_warp(A , A , cur_len=A )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[str] = 20
UpperCAmelCase : str = 4
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A )
# check that min length is applied at length 5
UpperCAmelCase : Any = ids_tensor((batch_size, 20) , vocab_size=20 )
UpperCAmelCase : List[str] = 5
UpperCAmelCase : Dict = self._get_uniform_logits(A , A )
UpperCAmelCase : Dict = min_dist_processor(A , A , cur_len=A )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
UpperCAmelCase : Optional[Any] = self._get_uniform_logits(A , A )
UpperCAmelCase : List[Any] = 15
UpperCAmelCase : int = min_dist_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Union[str, Any] = 20
UpperCAmelCase : Tuple = 4
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
# check that all scores are -inf except the bos_token_id score
UpperCAmelCase : Dict = ids_tensor((batch_size, 1) , vocab_size=20 )
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Union[str, Any] = self._get_uniform_logits(A , A )
UpperCAmelCase : Union[str, Any] = logits_processor(A , A , cur_len=A )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : List[Any] = self._get_uniform_logits(A , A )
UpperCAmelCase : Optional[Any] = logits_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def _lowercase( self ) -> int:
UpperCAmelCase : Optional[Any] = 20
UpperCAmelCase : int = 4
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : str = 5
UpperCAmelCase : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCAmelCase : str = ids_tensor((batch_size, 4) , vocab_size=20 )
UpperCAmelCase : str = 4
UpperCAmelCase : Optional[Any] = self._get_uniform_logits(A , A )
UpperCAmelCase : Dict = logits_processor(A , A , cur_len=A )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCAmelCase : Tuple = 3
UpperCAmelCase : Union[str, Any] = self._get_uniform_logits(A , A )
UpperCAmelCase : str = logits_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : List[str] = 4
UpperCAmelCase : Optional[int] = 10
UpperCAmelCase : Union[str, Any] = 15
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : Optional[Any] = 1
UpperCAmelCase : Any = 15
# dummy input_ids and scores
UpperCAmelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , A )
UpperCAmelCase : Tuple = input_ids.copy()
UpperCAmelCase : Union[str, Any] = self._get_uniform_logits(A , A )
UpperCAmelCase : Tuple = scores.copy()
# instantiate all dist processors
UpperCAmelCase : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase : Union[str, Any] = FlaxTopKLogitsWarper(3 )
UpperCAmelCase : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A )
UpperCAmelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
UpperCAmelCase : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
UpperCAmelCase : Tuple = 10
# no processor list
UpperCAmelCase : Dict = temp_dist_warp(A , A , cur_len=A )
UpperCAmelCase : int = top_k_warp(A , A , cur_len=A )
UpperCAmelCase : Union[str, Any] = top_p_warp(A , A , cur_len=A )
UpperCAmelCase : Optional[Any] = min_dist_proc(A , A , cur_len=A )
UpperCAmelCase : Optional[int] = bos_dist_proc(A , A , cur_len=A )
UpperCAmelCase : Union[str, Any] = eos_dist_proc(A , A , cur_len=A )
# with processor list
UpperCAmelCase : Dict = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase : Tuple = processor(A , A , cur_len=A )
# scores should be equal
self.assertTrue(jnp.allclose(A , A , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Tuple = 4
UpperCAmelCase : Any = 10
UpperCAmelCase : int = 15
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : Optional[int] = 1
UpperCAmelCase : Union[str, Any] = 15
# dummy input_ids and scores
UpperCAmelCase : Dict = ids_tensor((batch_size, sequence_length) , A )
UpperCAmelCase : Tuple = input_ids.copy()
UpperCAmelCase : Tuple = self._get_uniform_logits(A , A )
UpperCAmelCase : str = scores.copy()
# instantiate all dist processors
UpperCAmelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase : Tuple = FlaxTopKLogitsWarper(3 )
UpperCAmelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase : Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A )
UpperCAmelCase : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
UpperCAmelCase : Optional[int] = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
UpperCAmelCase : Optional[Any] = 10
# no processor list
def run_no_processor_list(A , A , A ):
UpperCAmelCase : Union[str, Any] = temp_dist_warp(A , A , cur_len=A )
UpperCAmelCase : str = top_k_warp(A , A , cur_len=A )
UpperCAmelCase : int = top_p_warp(A , A , cur_len=A )
UpperCAmelCase : Dict = min_dist_proc(A , A , cur_len=A )
UpperCAmelCase : Optional[Any] = bos_dist_proc(A , A , cur_len=A )
UpperCAmelCase : Tuple = eos_dist_proc(A , A , cur_len=A )
return scores
# with processor list
def run_processor_list(A , A , A ):
UpperCAmelCase : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase : List[str] = processor(A , A , cur_len=A )
return scores
UpperCAmelCase : str = jax.jit(A )
UpperCAmelCase : Dict = jax.jit(A )
UpperCAmelCase : List[Any] = jitted_run_no_processor_list(A , A , A )
UpperCAmelCase : int = jitted_run_processor_list(A , A , A )
# scores should be equal
self.assertTrue(jnp.allclose(A , A , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 265 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = LongformerTokenizer
lowercase = True
lowercase = LongformerTokenizerFast
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCAmelCase : int = dict(zip(A , range(len(A ) ) ) )
UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def _lowercase( self , **A ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , **A ) -> int:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = """lower newer"""
UpperCAmelCase : Optional[int] = """lower newer"""
return input_text, output_text
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase : Dict = """lower newer"""
UpperCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
UpperCAmelCase : Tuple = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokens + [tokenizer.unk_token]
UpperCAmelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A )
UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A )
UpperCAmelCase : List[str] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : List[str] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : str = self.get_tokenizer()
UpperCAmelCase : List[Any] = """Encode this sequence."""
UpperCAmelCase : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A , A )
UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A , A )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A , A )
# Testing spaces after special tokens
UpperCAmelCase : Union[str, Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
UpperCAmelCase : Union[str, Any] = """Encode <mask> sequence"""
UpperCAmelCase : Union[str, Any] = """Encode <mask>sequence"""
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Union[str, Any] = encoded.index(A )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A , A )
UpperCAmelCase : Tuple = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = encoded.index(A )
UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A , A )
def _lowercase( self ) -> Optional[int]:
pass
def _lowercase( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence."""
UpperCAmelCase : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
UpperCAmelCase : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def _lowercase( self ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""trim_offsets"""] , A )
def _lowercase( self ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase : int = f'''{text_of_1_token} {text_of_1_token}'''
UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[Any] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
| 265 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class UpperCamelCase_ :
lowercase = 42
lowercase = 42
class UpperCamelCase_ :
def __init__( self , A ) -> Optional[int]:
UpperCAmelCase : list[list[Edge]] = [[] for _ in range(A )]
UpperCAmelCase : List[str] = size
def __getitem__( self , A ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def _lowercase( self ) -> List[Any]:
return self._size
def _lowercase( self , A , A , A ) -> Optional[int]:
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(A , A ) )
def _lowercase( self , A , A ) -> int | None:
UpperCAmelCase : List[Any] = deque([start_vertex] )
UpperCAmelCase : list[int | None] = [None] * self.size
UpperCAmelCase : List[Any] = 0
while queue:
UpperCAmelCase : str = queue.popleft()
UpperCAmelCase : Union[str, Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCAmelCase : Any = current_distance + edge.weight
UpperCAmelCase : Dict = distances[edge.destination_vertex]
if (
isinstance(A , A )
and new_distance >= dest_vertex_distance
):
continue
UpperCAmelCase : Optional[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a : Tuple = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _lowercase( self , A , A , A ) -> Dict:
UpperCAmelCase : Union[str, Any] = ZeroShotClassificationPipeline(
model=A , tokenizer=A , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _lowercase( self , A , A ) -> Optional[int]:
UpperCAmelCase : Dict = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
# No kwarg
UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
UpperCAmelCase : str = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
UpperCAmelCase : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
UpperCAmelCase : Dict = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
# https://github.com/huggingface/transformers/issues/13846
UpperCAmelCase : str = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
A , [
{"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]}
for i in range(1 )
] , )
UpperCAmelCase : List[str] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
A , [
{"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]}
for i in range(2 )
] , )
with self.assertRaises(A ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(A ):
classifier(A , candidate_labels="""politics""" )
with self.assertRaises(A ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(A ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=A )
with self.assertRaises(A ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(A ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A , )
self.run_entailment_id(A )
def _lowercase( self , A ) -> Any:
UpperCAmelCase : Tuple = zero_shot_classifier.model.config
UpperCAmelCase : Union[str, Any] = config.labelaid
UpperCAmelCase : Tuple = zero_shot_classifier.entailment_id
UpperCAmelCase : Any = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
UpperCAmelCase : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCAmelCase : Any = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCAmelCase : List[str] = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
UpperCAmelCase : Tuple = original_labelaid
self.assertEqual(A , zero_shot_classifier.entailment_id )
@require_torch
def _lowercase( self ) -> str:
UpperCAmelCase : int = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
UpperCAmelCase : Union[str, Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
UpperCAmelCase : List[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[int] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
UpperCAmelCase : Optional[int] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
UpperCAmelCase : str = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
UpperCAmelCase : Tuple = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
UpperCAmelCase : Any = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 265 | 1 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
a : Optional[int] = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def __lowerCamelCase ( _lowercase ) -> Tuple:
UpperCAmelCase : str = {}
state_dict.pop("""pixel_mean""" , _lowercase )
state_dict.pop("""pixel_std""" , _lowercase )
UpperCAmelCase : Tuple = R""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase : Union[str, Any] = key.replace(_lowercase , _lowercase )
if re.match(_lowercase , _lowercase ):
UpperCAmelCase : Dict = int(re.match(_lowercase , _lowercase ).group(2 ) )
if layer_nb == 0:
UpperCAmelCase : Tuple = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
UpperCAmelCase : Union[str, Any] = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
UpperCAmelCase : Tuple = key.replace("""layers.2""" , """proj_out""" )
UpperCAmelCase : List[str] = value
UpperCAmelCase : List[str] = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase="ybelkada/segment-anything" ) -> Optional[Any]:
UpperCAmelCase : Dict = hf_hub_download(_lowercase , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
UpperCAmelCase : int = SamConfig()
elif "sam_vit_l" in model_name:
UpperCAmelCase : str = SamVisionConfig(
hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , )
UpperCAmelCase : Dict = SamConfig(
vision_config=_lowercase , )
elif "sam_vit_h" in model_name:
UpperCAmelCase : str = SamVisionConfig(
hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , )
UpperCAmelCase : Optional[int] = SamConfig(
vision_config=_lowercase , )
UpperCAmelCase : Union[str, Any] = torch.load(_lowercase , map_location="""cpu""" )
UpperCAmelCase : Union[str, Any] = replace_keys(_lowercase )
UpperCAmelCase : Union[str, Any] = SamImageProcessor()
UpperCAmelCase : Union[str, Any] = SamProcessor(image_processor=_lowercase )
UpperCAmelCase : Optional[int] = SamModel(_lowercase )
hf_model.load_state_dict(_lowercase )
UpperCAmelCase : Dict = hf_model.to("""cuda""" )
UpperCAmelCase : Optional[Any] = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
UpperCAmelCase : List[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert("""RGB""" )
UpperCAmelCase : List[Any] = [[[4_0_0, 6_5_0]]]
UpperCAmelCase : Any = [[1]]
UpperCAmelCase : Optional[Any] = processor(images=np.array(_lowercase ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
UpperCAmelCase : Dict = hf_model(**_lowercase )
UpperCAmelCase : str = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
UpperCAmelCase : Any = processor(
images=np.array(_lowercase ) , input_points=_lowercase , input_labels=_lowercase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
UpperCAmelCase : List[str] = hf_model(**_lowercase )
UpperCAmelCase : Optional[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
UpperCAmelCase : Optional[int] = ((7_5, 2_7_5, 1_7_2_5, 8_5_0),)
UpperCAmelCase : Optional[int] = processor(images=np.array(_lowercase ) , input_boxes=_lowercase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
UpperCAmelCase : int = hf_model(**_lowercase )
UpperCAmelCase : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
UpperCAmelCase : Any = [[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]
UpperCAmelCase : Optional[int] = [[1, 1]]
UpperCAmelCase : List[str] = processor(
images=np.array(_lowercase ) , input_points=_lowercase , input_labels=_lowercase , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
UpperCAmelCase : Optional[int] = hf_model(**_lowercase )
UpperCAmelCase : Optional[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
a : Any = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
a : Dict = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 265 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a : List[Any] = """__DUMMY_TRANSFORMERS_USER__"""
a : Tuple = """Dummy User"""
a : Optional[Any] = """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"""
a : Optional[Any] = """https://hub-ci.huggingface.co"""
a : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
a : Tuple = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
a : str = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> List[str]:
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _lowercase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Any:
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
HfFolder.save_token(_lowercase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> str:
return HfApi(endpoint=_lowercase )
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : str = HfFolder.get_token()
HfFolder.save_token(_lowercase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Any:
def _cleanup_repo(_lowercase ):
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> List[str]:
@contextmanager
def _temporary_repo(_lowercase ):
try:
yield repo_id
finally:
cleanup_repo(_lowercase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
UpperCAmelCase : str = F'''repo_txt_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : List[Any] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data/text_data.txt""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : Optional[int] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple:
UpperCAmelCase : List[Any] = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : List[str] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
return hf_private_dataset_repo_zipped_img_data_
| 265 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( __magic_name__ ):
lowercase = (PNDMScheduler,)
lowercase = (('num_inference_steps', 50),)
def _lowercase( self , **A ) -> Dict:
UpperCAmelCase : Union[str, Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**A )
return config
def _lowercase( self , A=0 , **A ) -> int:
UpperCAmelCase : List[str] = dict(self.forward_default_kwargs )
UpperCAmelCase : str = kwargs.pop("""num_inference_steps""" , A )
UpperCAmelCase : Tuple = self.dummy_sample
UpperCAmelCase : Optional[int] = 0.1 * sample
UpperCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase : Optional[int] = self.get_scheduler_config(**A )
UpperCAmelCase : Tuple = scheduler_class(**A )
scheduler.set_timesteps(A )
# copy over dummy past residuals
UpperCAmelCase : Optional[int] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A )
UpperCAmelCase : Optional[Any] = scheduler_class.from_pretrained(A )
new_scheduler.set_timesteps(A )
# copy over dummy past residuals
UpperCAmelCase : Optional[Any] = dummy_past_residuals[:]
UpperCAmelCase : List[Any] = scheduler.step_prk(A , A , A , **A ).prev_sample
UpperCAmelCase : str = new_scheduler.step_prk(A , A , A , **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase : Optional[Any] = scheduler.step_plms(A , A , A , **A ).prev_sample
UpperCAmelCase : Optional[int] = new_scheduler.step_plms(A , A , A , **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowercase( self ) -> Dict:
pass
def _lowercase( self , A=0 , **A ) -> Union[str, Any]:
UpperCAmelCase : Tuple = dict(self.forward_default_kwargs )
UpperCAmelCase : Optional[int] = kwargs.pop("""num_inference_steps""" , A )
UpperCAmelCase : List[Any] = self.dummy_sample
UpperCAmelCase : Dict = 0.1 * sample
UpperCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase : Any = self.get_scheduler_config()
UpperCAmelCase : str = scheduler_class(**A )
scheduler.set_timesteps(A )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A )
UpperCAmelCase : Tuple = scheduler_class.from_pretrained(A )
# copy over dummy past residuals
new_scheduler.set_timesteps(A )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase : Optional[int] = dummy_past_residuals[:]
UpperCAmelCase : Union[str, Any] = scheduler.step_prk(A , A , A , **A ).prev_sample
UpperCAmelCase : Union[str, Any] = new_scheduler.step_prk(A , A , A , **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase : Union[str, Any] = scheduler.step_plms(A , A , A , **A ).prev_sample
UpperCAmelCase : Dict = new_scheduler.step_plms(A , A , A , **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowercase( self , **A ) -> Dict:
UpperCAmelCase : int = self.scheduler_classes[0]
UpperCAmelCase : str = self.get_scheduler_config(**A )
UpperCAmelCase : int = scheduler_class(**A )
UpperCAmelCase : List[Any] = 10
UpperCAmelCase : List[str] = self.dummy_model()
UpperCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(A )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase : Optional[Any] = model(A , A )
UpperCAmelCase : Tuple = scheduler.step_prk(A , A , A ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase : Dict = model(A , A )
UpperCAmelCase : Optional[int] = scheduler.step_plms(A , A , A ).prev_sample
return sample
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Optional[int] = dict(self.forward_default_kwargs )
UpperCAmelCase : Optional[int] = kwargs.pop("""num_inference_steps""" , A )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase : List[str] = self.get_scheduler_config()
UpperCAmelCase : List[Any] = scheduler_class(**A )
UpperCAmelCase : Union[str, Any] = self.dummy_sample
UpperCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(A , """set_timesteps""" ):
scheduler.set_timesteps(A )
elif num_inference_steps is not None and not hasattr(A , """set_timesteps""" ):
UpperCAmelCase : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCAmelCase : Union[str, Any] = dummy_past_residuals[:]
UpperCAmelCase : Optional[Any] = scheduler.step_prk(A , 0 , A , **A ).prev_sample
UpperCAmelCase : Dict = scheduler.step_prk(A , 1 , A , **A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase : Dict = scheduler.step_plms(A , 0 , A , **A ).prev_sample
UpperCAmelCase : int = scheduler.step_plms(A , 1 , A , **A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowercase( self ) -> Optional[Any]:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=A )
def _lowercase( self ) -> Tuple:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A )
UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase : int = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase : Optional[int] = scheduler_class(**A )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def _lowercase( self ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=A , beta_end=A )
def _lowercase( self ) -> str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A )
def _lowercase( self ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def _lowercase( self ) -> Union[str, Any]:
for t in [1, 5, 10]:
self.check_over_forward(time_step=A )
def _lowercase( self ) -> Optional[int]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=A )
def _lowercase( self ) -> List[str]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase : Dict = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase : Optional[int] = self.dummy_sample
UpperCAmelCase : Tuple = 0.1 * sample
UpperCAmelCase : str = self.get_scheduler_config()
UpperCAmelCase : List[str] = scheduler_class(**A )
scheduler.set_timesteps(A )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase : List[Any] = scheduler.step_prk(A , A , A ).prev_sample
def _lowercase( self ) -> List[str]:
with self.assertRaises(A ):
UpperCAmelCase : List[Any] = self.scheduler_classes[0]
UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
UpperCAmelCase : List[Any] = scheduler_class(**A )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _lowercase( self ) -> str:
UpperCAmelCase : Dict = self.full_loop()
UpperCAmelCase : Optional[int] = torch.sum(torch.abs(A ) )
UpperCAmelCase : List[str] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Optional[int] = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase : List[str] = torch.sum(torch.abs(A ) )
UpperCAmelCase : str = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def _lowercase( self ) -> int:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase : str = self.full_loop(set_alpha_to_one=A , beta_start=0.0_1 )
UpperCAmelCase : Any = torch.sum(torch.abs(A ) )
UpperCAmelCase : Dict = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def _lowercase( self ) -> str:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase : Union[str, Any] = self.full_loop(set_alpha_to_one=A , beta_start=0.0_1 )
UpperCAmelCase : int = torch.sum(torch.abs(A ) )
UpperCAmelCase : int = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 265 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , **A ) -> List[str]:
super().__init__(**A )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , A , **A ) -> Optional[Any]:
return super().__call__(A , **A )
def _lowercase( self , **A ) -> Optional[Any]:
UpperCAmelCase : List[Any] = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Dict = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
UpperCAmelCase : Optional[Any] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase( self , A , A=None , A="This is a photo of {}." ) -> Optional[Any]:
UpperCAmelCase : int = load_image(A )
UpperCAmelCase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
UpperCAmelCase : List[str] = candidate_labels
UpperCAmelCase : Tuple = [hypothesis_template.format(A ) for x in candidate_labels]
UpperCAmelCase : Union[str, Any] = self.tokenizer(A , return_tensors=self.framework , padding=A )
UpperCAmelCase : Union[str, Any] = [text_inputs]
return inputs
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : List[Any] = model_inputs.pop("""candidate_labels""" )
UpperCAmelCase : Optional[Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , A ):
UpperCAmelCase : Optional[Any] = text_inputs[0]
else:
# Batching case.
UpperCAmelCase : Any = text_inputs[0][0]
UpperCAmelCase : Dict = self.model(**A , **A )
UpperCAmelCase : List[Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def _lowercase( self , A ) -> Union[str, Any]:
UpperCAmelCase : int = model_outputs.pop("""candidate_labels""" )
UpperCAmelCase : int = model_outputs["""logits"""][0]
if self.framework == "pt":
UpperCAmelCase : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCAmelCase : Any = probs.tolist()
if not isinstance(A , A ):
UpperCAmelCase : Any = [scores]
elif self.framework == "tf":
UpperCAmelCase : List[str] = stable_softmax(A , axis=-1 )
UpperCAmelCase : Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCAmelCase : Any = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(A , A ) , key=lambda A : -x[0] )
]
return result
| 265 | 1 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Dict = 1
while repunit:
UpperCAmelCase : List[str] = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def __lowerCamelCase ( _lowercase = 1_0_0_0_0_0_0 ) -> int:
UpperCAmelCase : Union[str, Any] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowercase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 265 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
return getitem, k
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
return setitem, k, v
def __lowerCamelCase ( _lowercase ) -> int:
return delitem, k
def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]:
try:
return fun(_lowercase , *_lowercase ), None
except Exception as e:
return None, e
a : List[str] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a : List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a : int = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a : List[Any] = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a : Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : List[str] = HashMap(initial_block_size=4 )
UpperCAmelCase : Dict = {}
for _, (fun, *args) in enumerate(_lowercase ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase )
UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase )
assert my_res == py_res
assert str(_lowercase ) == str(_lowercase )
assert set(_lowercase ) == set(_lowercase )
assert len(_lowercase ) == len(_lowercase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowercase ) -> bool:
return not name.startswith("""_""" )
UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )}
UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )}
assert dict_public_names > hash_public_names
| 265 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = CLIPTokenizer
lowercase = CLIPTokenizerFast
lowercase = True
lowercase = {}
lowercase = False
def _lowercase( self ) -> Optional[int]:
super().setUp()
# fmt: off
UpperCAmelCase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
UpperCAmelCase : Dict = dict(zip(A , range(len(A ) ) ) )
UpperCAmelCase : int = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
UpperCAmelCase : List[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def _lowercase( self , **A ) -> str:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , **A ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : List[Any] = """lower newer"""
UpperCAmelCase : int = """lower newer"""
return input_text, output_text
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase : Tuple = """lower newer"""
UpperCAmelCase : Tuple = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
UpperCAmelCase : Optional[Any] = tokenizer.tokenize(A )
self.assertListEqual(A , A )
UpperCAmelCase : Optional[Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase : Dict = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
@require_ftfy
def _lowercase( self ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : int = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
UpperCAmelCase : Any = tokenizer_s.tokenize(A )
UpperCAmelCase : Optional[Any] = tokenizer_r.tokenize(A )
self.assertListEqual(A , A )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCAmelCase : int = """xa\u0303y""" + """ """ + """x\xe3y"""
UpperCAmelCase : Dict = tokenizer_s.tokenize(A )
UpperCAmelCase : str = tokenizer_r.tokenize(A )
self.assertListEqual(A , A )
# Test that the tokenization is identical on unicode of space type
UpperCAmelCase : Optional[Any] = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCAmelCase : Dict = tokenizer_s.tokenize(A )
UpperCAmelCase : List[Any] = tokenizer_r.tokenize(A )
self.assertListEqual(A , A )
# Test that the tokenization is identical on unicode of line break type
UpperCAmelCase : List[str] = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCAmelCase : Optional[Any] = tokenizer_s.tokenize(A )
UpperCAmelCase : int = tokenizer_r.tokenize(A )
self.assertListEqual(A , A )
def _lowercase( self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : str = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase : Union[str, Any] = f'''{text_of_1_token} {text_of_1_token}'''
UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : Union[str, Any] = f''' {text}'''
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , )
def _lowercase( self ) -> Optional[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(A ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def _lowercase( self ) -> Any:
super().test_tokenization_python_rust_equals()
def _lowercase( self ) -> int:
# CLIP always lower cases letters
pass
| 265 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a : List[str] = 2_5_0_0_0_4
a : List[str] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = MBartTokenizer
lowercase = MBartTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : str = MBartTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self ) -> int:
UpperCAmelCase : Optional[Any] = MBartTokenizer(A , keep_accents=A )
UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A )
UpperCAmelCase : int = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : str = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = 'facebook/mbart-large-en-ro'
lowercase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowercase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowercase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def _lowercase( cls ) -> Tuple:
UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
UpperCAmelCase : int = 1
return cls
def _lowercase( self ) -> Union[str, Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def _lowercase( self ) -> List[str]:
self.assertIn(A , self.tokenizer.all_special_ids )
UpperCAmelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase : int = self.tokenizer.decode(A , skip_special_tokens=A )
UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
UpperCAmelCase : int = 10
UpperCAmelCase : List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A )
self.assertEqual(len(A ) , A )
def _lowercase( self ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = tempfile.mkdtemp()
UpperCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
UpperCAmelCase : Tuple = MBartTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
UpperCAmelCase : Dict = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
UpperCAmelCase : Dict = targets["""input_ids"""]
UpperCAmelCase : Union[str, Any] = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 3034, 2, 250004]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250001,
} , )
| 265 | 1 |
'''simple docstring'''
class UpperCamelCase_ :
def __init__( self , A , A , A ) -> str:
UpperCAmelCase : Union[str, Any] = name
UpperCAmelCase : List[str] = value
UpperCAmelCase : List[Any] = weight
def __repr__( self ) -> Optional[int]:
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def _lowercase( self ) -> List[str]:
return self.value
def _lowercase( self ) -> Any:
return self.name
def _lowercase( self ) -> List[Any]:
return self.weight
def _lowercase( self ) -> Optional[Any]:
return self.value / self.weight
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> int:
UpperCAmelCase : List[Any] = []
for i in range(len(_lowercase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Any:
UpperCAmelCase : Optional[Any] = sorted(_lowercase , key=_lowercase , reverse=_lowercase )
UpperCAmelCase : List[Any] = []
UpperCAmelCase , UpperCAmelCase : Tuple = 0.0, 0.0
for i in range(len(_lowercase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __lowerCamelCase ( ) -> int:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
a : List[str] = get_tests_dir("""fixtures""")
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase : Tuple = mock.Mock()
UpperCAmelCase : List[str] = 500
UpperCAmelCase : Any = {}
UpperCAmelCase : List[str] = HTTPError
UpperCAmelCase : str = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head:
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase( self ) -> Any:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def _lowercase( self ) -> Union[str, Any]:
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(A )
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
@classmethod
def _lowercase( cls ) -> Dict:
UpperCAmelCase : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def _lowercase( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token )
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token )
UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def _lowercase( self ) -> Optional[int]:
CustomImageProcessor.register_for_auto_class()
UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 265 | 1 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase ) -> Any:
print("""Loading config file...""" )
def flatten_yaml_as_dict(_lowercase , _lowercase="" , _lowercase="." ):
UpperCAmelCase : Optional[Any] = []
for k, v in d.items():
UpperCAmelCase : List[str] = parent_key + sep + k if parent_key else k
if isinstance(_lowercase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(_lowercase , _lowercase , sep=_lowercase ).items() )
else:
items.append((new_key, v) )
return dict(_lowercase )
UpperCAmelCase : List[str] = argparse.Namespace()
with open(_lowercase , """r""" ) as yaml_file:
try:
UpperCAmelCase : List[str] = yaml.load(_lowercase , Loader=yaml.FullLoader )
UpperCAmelCase : str = flatten_yaml_as_dict(_lowercase )
for k, v in flat_cfg.items():
setattr(_lowercase , _lowercase , _lowercase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(_lowercase , str(_lowercase ) ) )
return config
def __lowerCamelCase ( _lowercase , _lowercase ) -> Dict:
UpperCAmelCase : Any = MobileViTVaConfig()
UpperCAmelCase : int = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
UpperCAmelCase : Tuple = 1_0_0_0
if int(task_name.strip().split("""_""" )[-1] ) == 3_8_4:
UpperCAmelCase : List[str] = 3_8_4
else:
UpperCAmelCase : str = 2_5_6
UpperCAmelCase : Optional[Any] = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
UpperCAmelCase : int = 2_1_0_0_0
if int(task_name.strip().split("""_""" )[-1] ) == 3_8_4:
UpperCAmelCase : Any = 3_8_4
else:
UpperCAmelCase : int = 2_5_6
UpperCAmelCase : Optional[Any] = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
UpperCAmelCase : List[str] = 1_5_1
UpperCAmelCase : Dict = 5_1_2
UpperCAmelCase : List[Any] = """ade20k-id2label.json"""
UpperCAmelCase : Tuple = True
elif task_name.startswith("""voc_""" ):
UpperCAmelCase : Tuple = 2_1
UpperCAmelCase : Optional[int] = 5_1_2
UpperCAmelCase : Any = """pascal-voc-id2label.json"""
UpperCAmelCase : List[Any] = True
# orig_config
UpperCAmelCase : Any = load_orig_config_file(_lowercase )
assert getattr(_lowercase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
UpperCAmelCase : str = getattr(_lowercase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(_lowercase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCAmelCase : Dict = getattr(_lowercase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCAmelCase : List[str] = getattr(_lowercase , """model.segmentation.output_stride""" , 1_6 )
if "_deeplabv3" in task_name:
UpperCAmelCase : List[Any] = getattr(_lowercase , """model.segmentation.deeplabv3.aspp_rates""" , [1_2, 2_4, 3_6] )
UpperCAmelCase : Union[str, Any] = getattr(_lowercase , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_1_2 )
UpperCAmelCase : Optional[int] = getattr(_lowercase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
UpperCAmelCase : Optional[int] = """huggingface/label-files"""
UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : int = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : int = idalabel
UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = dct.pop(_lowercase )
UpperCAmelCase : Any = val
def __lowerCamelCase ( _lowercase , _lowercase=False ) -> Dict:
if base_model:
UpperCAmelCase : int = """"""
else:
UpperCAmelCase : Dict = """mobilevitv2."""
UpperCAmelCase : Optional[int] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCAmelCase : Optional[Any] = k[8:]
else:
UpperCAmelCase : Optional[Any] = k
if ".block." in k:
UpperCAmelCase : List[Any] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
UpperCAmelCase : Any = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
UpperCAmelCase : List[str] = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
UpperCAmelCase : Optional[int] = k_new.replace("""conv_1.""" , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
UpperCAmelCase : int = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
UpperCAmelCase : str = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
UpperCAmelCase : Dict = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
UpperCAmelCase : str = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
UpperCAmelCase : Optional[Any] = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
UpperCAmelCase : Any = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
UpperCAmelCase : List[Any] = [0, 1]
elif i == 4:
UpperCAmelCase : Optional[Any] = [0, 1, 2, 3]
elif i == 5:
UpperCAmelCase : Optional[int] = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
UpperCAmelCase : Dict = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
UpperCAmelCase : Optional[int] = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
UpperCAmelCase : Dict = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
UpperCAmelCase : int = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
UpperCAmelCase : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
UpperCAmelCase : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
UpperCAmelCase : str = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
UpperCAmelCase : str = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
UpperCAmelCase : Tuple = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
UpperCAmelCase : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
UpperCAmelCase : int = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def __lowerCamelCase ( _lowercase ) -> Dict:
UpperCAmelCase : Dict = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(_lowercase )
for k in keys_to_ignore:
state_dict.pop(_lowercase , _lowercase )
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCAmelCase : Optional[int] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : List[Any] = get_mobilevitva_config(_lowercase , _lowercase )
# load original state_dict
UpperCAmelCase : Any = torch.load(_lowercase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
UpperCAmelCase : Tuple = MobileViTVaForSemanticSegmentation(_lowercase ).eval()
UpperCAmelCase : Union[str, Any] = False
else:
UpperCAmelCase : Union[str, Any] = MobileViTVaForImageClassification(_lowercase ).eval()
UpperCAmelCase : Tuple = False
# remove and rename some keys of load the original model
UpperCAmelCase : List[str] = checkpoint
remove_unused_keys(_lowercase )
UpperCAmelCase : Union[str, Any] = create_rename_keys(_lowercase , base_model=_lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# load modified state_dict
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
UpperCAmelCase : Any = image_processor(images=prepare_img() , return_tensors="""pt""" )
UpperCAmelCase : Optional[int] = model(**_lowercase )
# verify classification model
if task_name.startswith("""imagenet""" ):
UpperCAmelCase : Dict = outputs.logits
UpperCAmelCase : Union[str, Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCAmelCase : Dict = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] )
assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
a : Optional[int] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 265 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( _lowercase ) -> Tuple:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : Any = create_tensor(_lowercase )
UpperCAmelCase : Union[str, Any] = gather(_lowercase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : Any = [state.process_index]
UpperCAmelCase : Union[str, Any] = gather_object(_lowercase )
assert len(_lowercase ) == state.num_processes, F'''{gathered_obj}, {len(_lowercase )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : Optional[int] = create_tensor(_lowercase )
UpperCAmelCase : List[str] = broadcast(_lowercase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __lowerCamelCase ( _lowercase ) -> Tuple:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
UpperCAmelCase : Tuple = torch.arange(state.num_processes ).to(state.device )
UpperCAmelCase : Optional[Any] = pad_across_processes(_lowercase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __lowerCamelCase ( _lowercase ) -> Dict:
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase : Optional[Any] = create_tensor(_lowercase )
UpperCAmelCase : Optional[Any] = reduce(_lowercase , """sum""" )
UpperCAmelCase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}'''
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase : Tuple = create_tensor(_lowercase )
UpperCAmelCase : Optional[int] = reduce(_lowercase , """mean""" )
UpperCAmelCase : str = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}'''
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
def __lowerCamelCase ( ) -> int:
UpperCAmelCase : List[Any] = PartialState()
state.print(F'''State: {state}''' )
state.print("""testing gather""" )
test_gather(_lowercase )
state.print("""testing gather_object""" )
test_gather_object(_lowercase )
state.print("""testing broadcast""" )
test_broadcast(_lowercase )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(_lowercase )
state.print("""testing reduce_sum""" )
test_reduce_sum(_lowercase )
state.print("""testing reduce_mean""" )
test_reduce_mean(_lowercase )
if __name__ == "__main__":
main()
| 265 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
a : Union[str, Any] = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 265 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]:
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def _lowercase( self , A = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def _lowercase( self ) -> Dict:
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]:
if isinstance(A , A ):
UpperCAmelCase : List[str] = 1
elif isinstance(A , A ):
UpperCAmelCase : Dict = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
UpperCAmelCase : List[str] = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCAmelCase : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape
UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 )
UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase : List[str]
if negative_prompt is None:
UpperCAmelCase : Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
UpperCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
UpperCAmelCase : Any = negative_prompt
UpperCAmelCase : Dict = text_input_ids.shape[-1]
UpperCAmelCase : List[Any] = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : int = uncond_embeddings.shape[1]
UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 )
UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCAmelCase : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase : Dict = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
UpperCAmelCase : int = torch.randn(
A , generator=A , device=self.device , dtype=A )
UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCAmelCase : Optional[Any] = latents_reference.to(self.device )
UpperCAmelCase : Tuple = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx
UpperCAmelCase : List[str] = 0 if dy < 0 else dy
UpperCAmelCase : Union[str, Any] = max(-dx , 0 )
UpperCAmelCase : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : Optional[Any] = {}
if accepts_eta:
UpperCAmelCase : List[str] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : str = self.scheduler.scale_model_input(A , A )
# predict the noise residual
UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 )
UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents
UpperCAmelCase : Tuple = self.vae.decode(A ).sample
UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
UpperCAmelCase , UpperCAmelCase : int = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCAmelCase : Any = None
if output_type == "pil":
UpperCAmelCase : int = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 265 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
a : int = None
a : int = logging.get_logger(__name__)
a : List[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
a : List[Any] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
a : Tuple = {
"""facebook/nllb-large-en-ro""": 1_0_2_4,
"""facebook/nllb-200-distilled-600M""": 1_0_2_4,
}
# fmt: off
a : str = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ['input_ids', 'attention_mask']
lowercase = NllbTokenizer
lowercase = []
lowercase = []
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=None , A=None , A=None , A=False , **A , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Optional[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
UpperCAmelCase : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=A , tokenizer_file=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , src_lang=A , tgt_lang=A , additional_special_tokens=A , legacy_behaviour=A , **A , )
UpperCAmelCase : List[str] = vocab_file
UpperCAmelCase : Union[str, Any] = False if not self.vocab_file else True
UpperCAmelCase : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
UpperCAmelCase : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase : List[Any] = src_lang if src_lang is not None else """eng_Latn"""
UpperCAmelCase : str = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase( self ) -> str:
return self._src_lang
@src_lang.setter
def _lowercase( self , A ) -> None:
UpperCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase( self , A , A , A , A , **A ) -> Union[str, Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCAmelCase : List[str] = src_lang
UpperCAmelCase : Any = self(A , add_special_tokens=A , return_tensors=A , **A )
UpperCAmelCase : Any = self.convert_tokens_to_ids(A )
UpperCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def _lowercase( self , A , A = "eng_Latn" , A = None , A = "fra_Latn" , **A , ) -> BatchEncoding:
UpperCAmelCase : Optional[Any] = src_lang
UpperCAmelCase : List[str] = tgt_lang
return super().prepare_seqaseq_batch(A , A , **A )
def _lowercase( self ) -> Tuple:
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase( self ) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase( self , A ) -> None:
UpperCAmelCase : str = self.convert_tokens_to_ids(A )
if self.legacy_behaviour:
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase : Dict = [self.cur_lang_code]
UpperCAmelCase : Optional[Any] = [self.eos_token_id]
UpperCAmelCase : str = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase( self , A ) -> None:
UpperCAmelCase : Optional[Any] = self.convert_tokens_to_ids(A )
if self.legacy_behaviour:
UpperCAmelCase : int = []
UpperCAmelCase : str = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase : List[str] = [self.cur_lang_code]
UpperCAmelCase : List[Any] = [self.eos_token_id]
UpperCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
UpperCAmelCase : Union[str, Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 265 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
def __init__( self , A , A=12 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=512 , A=0.0_2 , A=0 , A=None , ) -> Any:
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : int = use_input_mask
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : str = hidden_size
UpperCAmelCase : List[Any] = projection_dim
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Any = dropout
UpperCAmelCase : List[Any] = attention_dropout
UpperCAmelCase : Optional[Any] = max_position_embeddings
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = scope
UpperCAmelCase : Union[str, Any] = bos_token_id
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase : Tuple = input_mask.numpy()
UpperCAmelCase , UpperCAmelCase : int = input_mask.shape
UpperCAmelCase : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(A ):
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : int = self.get_config()
return config, input_ids, tf.convert_to_tensor(A )
def _lowercase( self ) -> int:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _lowercase( self , A , A , A ) -> Union[str, Any]:
UpperCAmelCase : int = TFBlipTextModel(config=A )
UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , training=A )
UpperCAmelCase : int = model(A , training=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs
UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = (TFBlipTextModel,) if is_tf_available() else ()
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = BlipTextModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Tuple:
self.config_tester.run_common_tests()
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> List[str]:
pass
def _lowercase( self ) -> Optional[int]:
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def _lowercase( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _lowercase( self ) -> Optional[int]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _lowercase( self ) -> Dict:
pass
@slow
def _lowercase( self ) -> Dict:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = TFBlipTextModel.from_pretrained(A )
self.assertIsNotNone(A )
def _lowercase( self , A=True ) -> str:
super().test_pt_tf_model_equivalence(allow_missing_keys=A )
| 265 | 1 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> list[int]:
UpperCAmelCase : Optional[Any] = [0] * no_of_processes
UpperCAmelCase : str = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_lowercase ):
UpperCAmelCase : Optional[Any] = burst_time[i]
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : int = 9_9_9_9_9_9_9_9_9
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Optional[int] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_lowercase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCAmelCase : Any = remaining_time[j]
UpperCAmelCase : int = j
UpperCAmelCase : Optional[int] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCAmelCase : Optional[int] = remaining_time[short]
if minm == 0:
UpperCAmelCase : Optional[Any] = 9_9_9_9_9_9_9_9_9
if remaining_time[short] == 0:
complete += 1
UpperCAmelCase : Tuple = False
# Find finish time of current process
UpperCAmelCase : Tuple = increment_time + 1
# Calculate waiting time
UpperCAmelCase : int = finish_time - arrival_time[short]
UpperCAmelCase : Any = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCAmelCase : Optional[int] = 0
# Increment time
increment_time += 1
return waiting_time
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> list[int]:
UpperCAmelCase : str = [0] * no_of_processes
for i in range(_lowercase ):
UpperCAmelCase : str = burst_time[i] + waiting_time[i]
return turn_around_time
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> None:
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
for i in range(_lowercase ):
UpperCAmelCase : Optional[int] = total_waiting_time + waiting_time[i]
UpperCAmelCase : Union[str, Any] = total_turn_around_time + turn_around_time[i]
print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
a : Optional[Any] = int(input())
a : Union[str, Any] = [0] * no_of_processes
a : int = [0] * no_of_processes
a : List[str] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
a , a : Tuple = map(int, input().split())
a : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a : Union[str, Any] = burst_time
a : Optional[Any] = no_of_processes
a : Tuple = waiting_time
a : Tuple = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
a : Optional[int] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 265 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
a : str = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
a : int = """main"""
# Default branch name
a : Any = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
a : str = """aaaaaaa"""
# This commit does not exist, so we should 404.
a : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
a : Any = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def __lowerCamelCase ( ) -> List[str]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Optional[int]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> List[Any]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class UpperCamelCase_ ( unittest.TestCase ):
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase( self , A ) -> Tuple:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase( self , A ) -> Dict:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase( self , A ) -> Union[str, Any]:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def _lowercase( self ) -> Optional[int]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class UpperCamelCase_ ( __magic_name__ ):
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def _lowercase( self ) -> int:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class UpperCamelCase_ ( __magic_name__ ):
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def _lowercase( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class UpperCamelCase_ ( __magic_name__ ):
pass
self.assertEqual(find_labels(A ) , [] )
| 265 | 1 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( __magic_name__ ):
lowercase = (UnCLIPScheduler,)
def _lowercase( self , **A ) -> Optional[int]:
UpperCAmelCase : int = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**A )
return config
def _lowercase( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=A )
def _lowercase( self ) -> List[str]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=A )
def _lowercase( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A )
def _lowercase( self ) -> str:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=A )
def _lowercase( self ) -> Tuple:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=A )
def _lowercase( self ) -> Tuple:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=A , prev_timestep=A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : int = self.scheduler_classes[0]
UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(variance_type="""fixed_small_log""" )
UpperCAmelCase : Tuple = scheduler_class(**A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_5_4_9_6_2_5 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_9_9_4_9_8_7 ) ) < 1e-5
def _lowercase( self ) -> Dict:
UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(variance_type="""learned_range""" )
UpperCAmelCase : Union[str, Any] = scheduler_class(**A )
UpperCAmelCase : Optional[Any] = 0.5
assert scheduler._get_variance(1 , predicted_variance=A ) - -1_0.1_7_1_2_7_9_0 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=A ) - -5.7_9_9_8_0_5_2 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=A ) - -0.0_0_1_0_0_1_1 < 1e-5
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Tuple = self.scheduler_classes[0]
UpperCAmelCase : Any = self.get_scheduler_config()
UpperCAmelCase : List[str] = scheduler_class(**A )
UpperCAmelCase : Union[str, Any] = scheduler.timesteps
UpperCAmelCase : str = self.dummy_model()
UpperCAmelCase : List[Any] = self.dummy_sample_deter
UpperCAmelCase : List[str] = torch.manual_seed(0 )
for i, t in enumerate(A ):
# 1. predict noise residual
UpperCAmelCase : Optional[Any] = model(A , A )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase : List[Any] = scheduler.step(A , A , A , generator=A ).prev_sample
UpperCAmelCase : Any = pred_prev_sample
UpperCAmelCase : List[str] = torch.sum(torch.abs(A ) )
UpperCAmelCase : int = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1e-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1e-3
def _lowercase( self ) -> str:
UpperCAmelCase : str = self.scheduler_classes[0]
UpperCAmelCase : Optional[int] = self.get_scheduler_config()
UpperCAmelCase : int = scheduler_class(**A )
scheduler.set_timesteps(25 )
UpperCAmelCase : Any = scheduler.timesteps
UpperCAmelCase : Union[str, Any] = self.dummy_model()
UpperCAmelCase : Optional[int] = self.dummy_sample_deter
UpperCAmelCase : Tuple = torch.manual_seed(0 )
for i, t in enumerate(A ):
# 1. predict noise residual
UpperCAmelCase : Tuple = model(A , A )
if i + 1 == timesteps.shape[0]:
UpperCAmelCase : Dict = None
else:
UpperCAmelCase : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCAmelCase : Optional[int] = scheduler.step(
A , A , A , prev_timestep=A , generator=A ).prev_sample
UpperCAmelCase : Union[str, Any] = pred_prev_sample
UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(A ) )
UpperCAmelCase : Any = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1e-3
def _lowercase( self ) -> List[str]:
pass
def _lowercase( self ) -> Union[str, Any]:
pass
| 265 |
'''simple docstring'''
from itertools import count
def __lowerCamelCase ( _lowercase = 5_0 ) -> int:
UpperCAmelCase : Any = [1] * min_block_length
for n in count(_lowercase ):
fill_count_functions.append(1 )
for block_length in range(_lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 265 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _lowercase( self ) -> Any:
UpperCAmelCase : int = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
UpperCAmelCase : Any = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
UpperCAmelCase : List[str] = """The dog is cute and lives in the garden house"""
UpperCAmelCase : Union[str, Any] = jnp.array([tokenizer.encode(A )] )
UpperCAmelCase : Optional[Any] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase : Dict = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
UpperCAmelCase : Union[str, Any] = model(A )["""last_hidden_state"""]
self.assertEqual(output.shape , A )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , A , atol=1e-3 ) )
| 265 |
'''simple docstring'''
from __future__ import annotations
import math
class UpperCamelCase_ :
def __init__( self , A ) -> None:
UpperCAmelCase : Optional[int] = size
# approximate the overall size of segment tree with given value
UpperCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCAmelCase : Any = [0 for i in range(0 , 4 * size )]
UpperCAmelCase : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update
def _lowercase( self , A ) -> int:
return idx * 2
def _lowercase( self , A ) -> int:
return idx * 2 + 1
def _lowercase( self , A , A , A , A ) -> None:
if left_element == right_element:
UpperCAmelCase : str = a[left_element - 1]
else:
UpperCAmelCase : Tuple = (left_element + right_element) // 2
self.build(self.left(A ) , A , A , A )
self.build(self.right(A ) , mid + 1 , A , A )
UpperCAmelCase : str = max(
self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] )
def _lowercase( self , A , A , A , A , A , A ) -> bool:
if self.flag[idx] is True:
UpperCAmelCase : Optional[Any] = self.lazy[idx]
UpperCAmelCase : int = False
if left_element != right_element:
UpperCAmelCase : List[str] = self.lazy[idx]
UpperCAmelCase : Optional[Any] = self.lazy[idx]
UpperCAmelCase : List[str] = True
UpperCAmelCase : int = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCAmelCase : Optional[Any] = val
if left_element != right_element:
UpperCAmelCase : Tuple = val
UpperCAmelCase : int = val
UpperCAmelCase : Any = True
UpperCAmelCase : str = True
return True
UpperCAmelCase : str = (left_element + right_element) // 2
self.update(self.left(A ) , A , A , A , A , A )
self.update(self.right(A ) , mid + 1 , A , A , A , A )
UpperCAmelCase : List[str] = max(
self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] )
return True
def _lowercase( self , A , A , A , A , A ) -> int | float:
if self.flag[idx] is True:
UpperCAmelCase : Any = self.lazy[idx]
UpperCAmelCase : Any = False
if left_element != right_element:
UpperCAmelCase : Optional[Any] = self.lazy[idx]
UpperCAmelCase : Tuple = self.lazy[idx]
UpperCAmelCase : List[str] = True
UpperCAmelCase : Tuple = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCAmelCase : Dict = (left_element + right_element) // 2
UpperCAmelCase : List[Any] = self.query(self.left(A ) , A , A , A , A )
UpperCAmelCase : str = self.query(self.right(A ) , mid + 1 , A , A , A )
return max(A , A )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , A , A ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
a : Optional[Any] = 1_5
a : Union[str, Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 265 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Tuple = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 265 |
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( _lowercase , _lowercase ) -> Image:
def brightness(_lowercase ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_lowercase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
a : Optional[Any] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 265 | 1 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase = " " ) -> list:
UpperCAmelCase : int = []
UpperCAmelCase : Dict = 0
for index, char in enumerate(_lowercase ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase : int = index + 1
elif index + 1 == len(_lowercase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 265 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Optional[int]:
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : Dict = use_input_mask
UpperCAmelCase : str = use_token_type_ids
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : int = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : List[str] = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : str = num_labels
UpperCAmelCase : Optional[int] = num_choices
UpperCAmelCase : Dict = scope
UpperCAmelCase : Union[str, Any] = vocab_size - 1
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowercase( self ) -> Optional[Any]:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase : Any = True
return config, input_ids, input_mask, token_labels
def _lowercase( self , A , A , A ) -> int:
UpperCAmelCase : str = GPTNeoXModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[str] = model(A , attention_mask=A )
UpperCAmelCase : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A ) -> Optional[int]:
UpperCAmelCase : str = True
UpperCAmelCase : Optional[Any] = GPTNeoXModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A ) -> List[str]:
UpperCAmelCase : Tuple = GPTNeoXForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : str = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A ) -> Tuple:
UpperCAmelCase : List[str] = self.num_labels
UpperCAmelCase : Any = GPTNeoXForQuestionAnswering(A )
model.to(A )
model.eval()
UpperCAmelCase : str = model(A , attention_mask=A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase( self , A , A , A , A ) -> int:
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A , A ) -> str:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Tuple = GPTNeoXForTokenClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase( self , A , A , A ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : str = GPTNeoXForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : List[str] = model(A , attention_mask=A , use_cache=A )
UpperCAmelCase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A )
UpperCAmelCase : Any = output_from_no_past["""hidden_states"""][0]
UpperCAmelCase : List[str] = model(
A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = GPTNeoXModelTester(self )
UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A , A , A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(A , A , A )
def _lowercase( self ) -> Optional[Any]:
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(A , A , A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*A )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def _lowercase( self ) -> Any:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _lowercase( self ) -> Optional[int]:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Dict = GPTNeoXModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : Any = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = GPTNeoXModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCAmelCase : int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(A )
UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase : List[str] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCAmelCase : Union[str, Any] = model.generate(**A , do_sample=A , max_new_tokens=20 )
UpperCAmelCase : Tuple = tokenizer.batch_decode(A )[0]
self.assertEqual(A , A )
| 265 | 1 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Optional[Any] , __UpperCAmelCase : Callable , __UpperCAmelCase : Optional[Features] = None , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[dict] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : Dict , ) ->List[Any]:
"""simple docstring"""
super().__init__(
features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
a = Generator(
cache_dir=__UpperCAmelCase , features=__UpperCAmelCase , generator=__UpperCAmelCase , gen_kwargs=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
if self.streaming:
a = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
a = None
a = None
a = None
a = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
a = self.builder.as_dataset(
split='''train''' , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 0 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
else:
return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> float:
if b < 0:
return 1 / actual_power(_lowercase , _lowercase )
return actual_power(_lowercase , _lowercase )
if __name__ == "__main__":
print(power(-2, -3))
| 265 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __A ( metaclass=UpperCamelCase__ ):
a__ : List[str] = ["""onnx"""]
def __init__(self : List[Any] , *__a : Dict , **__a : Optional[Any] ):
requires_backends(self , ["onnx"] )
@classmethod
def _lowercase (cls : List[str] , *__a : Any , **__a : List[Any] ):
requires_backends(cls , ["onnx"] )
@classmethod
def _lowercase (cls : Optional[int] , *__a : Any , **__a : int ):
requires_backends(cls , ["onnx"] )
| 1 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : Any = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = AlbertTokenizer
lowercase = AlbertTokenizerFast
lowercase = True
lowercase = True
lowercase = True
def _lowercase( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[int] = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self , A ) -> int:
UpperCAmelCase : Optional[int] = """this is a test"""
UpperCAmelCase : Dict = """this is a test"""
return input_text, output_text
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = """<pad>"""
UpperCAmelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(A ) , 30000 )
def _lowercase( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : List[str] = self.get_rust_tokenizer()
UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé."""
UpperCAmelCase : str = tokenizer.tokenize(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A )
UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = AlbertTokenizer(A )
UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" )
UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase( self ) -> Dict:
# fmt: off
UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 265 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple:
"""simple docstring"""
lowercase__ = (boundary[1] - boundary[0]) / steps
lowercase__ = boundary[0]
lowercase__ = boundary[1]
lowercase__ = make_points(A , A , A )
lowercase__ = 0.0
y += (h / 2.0) * f(A )
for i in x_i:
# print(i)
y += h * f(A )
y += (h / 2.0) * f(A )
return y
def _SCREAMING_SNAKE_CASE (A , A , A ) -> int:
"""simple docstring"""
lowercase__ = a + h
while x < (b - h):
yield x
lowercase__ = x + h
def _SCREAMING_SNAKE_CASE (A ) -> Any: # enter your function here
"""simple docstring"""
lowercase__ = (x - 0) * (x - 0)
return y
def _SCREAMING_SNAKE_CASE () -> List[str]:
"""simple docstring"""
lowercase__ = 0.0 # Lower bound of integration
lowercase__ = 1.0 # Upper bound of integration
lowercase__ = 10.0 # define number of steps or resolution
lowercase__ = [a, b] # define boundary of integration
lowercase__ = method_a(A , A )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 2 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = StableDiffusionDiffEditPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase = frozenset([] )
def _lowercase( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , )
UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , )
UpperCAmelCase : List[Any] = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , )
torch.manual_seed(0 )
UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
UpperCAmelCase : Optional[Any] = CLIPTextModel(A )
UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowercase( self , A , A=0 ) -> Optional[Any]:
UpperCAmelCase : Any = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : List[Any] = torch.manual_seed(A )
else:
UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : int = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self , A , A=0 ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : Any = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self , A , A=0 ) -> str:
UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : str = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self ) -> List[Any]:
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
UpperCAmelCase : Dict = self.get_dummy_components()
UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A , A , A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase : Any = self.get_dummy_inputs(A )
UpperCAmelCase : Optional[Any] = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
UpperCAmelCase : Dict = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A , A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A )
UpperCAmelCase : Tuple = pipe_loaded(**A )[0]
UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max()
self.assertLess(A , 1e-4 )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[int] = """cpu"""
UpperCAmelCase : Optional[Any] = self.get_dummy_components()
UpperCAmelCase : List[str] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(A )
UpperCAmelCase : List[Any] = pipe.generate_mask(**A )
UpperCAmelCase : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase : Optional[int] = np.array([0] * 9 )
UpperCAmelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = """cpu"""
UpperCAmelCase : List[str] = self.get_dummy_components()
UpperCAmelCase : Optional[Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(A )
UpperCAmelCase : List[str] = pipe.invert(**A ).images
UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase : Dict = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
UpperCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
def _lowercase( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def _lowercase( self ) -> int:
UpperCAmelCase : List[Any] = """cpu"""
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""}
UpperCAmelCase : int = DPMSolverMultistepScheduler(**A )
UpperCAmelCase : int = DPMSolverMultistepInverseScheduler(**A )
UpperCAmelCase : List[str] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(A )
UpperCAmelCase : Any = pipe.invert(**A ).images
UpperCAmelCase : Dict = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase : Any = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _lowercase( cls ) -> Dict:
UpperCAmelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) )
UpperCAmelCase : List[str] = raw_image
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Dict = torch.manual_seed(0 )
UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa )
UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Tuple = """a bowl of fruit"""
UpperCAmelCase : List[Any] = """a bowl of pears"""
UpperCAmelCase : str = pipe.generate_mask(
image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , )
UpperCAmelCase : Tuple = pipe.invert(
prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents
UpperCAmelCase : Any = pipe(
prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
UpperCAmelCase : List[str] = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa )
UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : int = """a bowl of fruit"""
UpperCAmelCase : int = """a bowl of pears"""
UpperCAmelCase : str = pipe.generate_mask(
image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , )
UpperCAmelCase : Any = pipe.invert(
prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents
UpperCAmelCase : str = pipe(
prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
UpperCAmelCase : Tuple = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 265 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase : int = logging.get_logger(__name__)
lowercase : Tuple = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
for attribute in key.split('''.''' ):
A : int = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
A : Optional[int] = getattr(snake_case__ , snake_case__ ).shape
else:
A : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
A : Optional[int] = value
elif weight_type == "weight_g":
A : str = value
elif weight_type == "weight_v":
A : str = value
elif weight_type == "bias":
A : List[str] = value
else:
A : Any = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = []
A : Optional[int] = fairseq_model.state_dict()
A : Optional[Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A : Tuple = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == '''group''' , )
A : str = True
else:
for key, mapped_key in MAPPING.items():
A : str = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
A : Union[str, Any] = True
if "*" in mapped_key:
A : List[Any] = name.split(snake_case__ )[0].split('''.''' )[-2]
A : Optional[int] = mapped_key.replace('''*''' , snake_case__ )
if "weight_g" in name:
A : List[str] = '''weight_g'''
elif "weight_v" in name:
A : Union[str, Any] = '''weight_v'''
elif "weight" in name:
A : Dict = '''weight'''
elif "bias" in name:
A : Dict = '''bias'''
else:
A : Optional[Any] = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Union[str, Any] = full_name.split('''conv_layers.''' )[-1]
A : List[Any] = name.split('''.''' )
A : Union[str, Any] = int(items[0] )
A : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A : Optional[Any] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A : Optional[int] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A : int = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A : int = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = SEWConfig()
if is_finetuned:
A : List[Any] = model.wav_encoder.wav_model.cfg
else:
A : str = model.cfg
A : int = fs_config.conv_bias
A : Optional[int] = eval(fs_config.conv_feature_layers )
A : Union[str, Any] = [x[0] for x in conv_layers]
A : Dict = [x[1] for x in conv_layers]
A : Dict = [x[2] for x in conv_layers]
A : str = '''gelu'''
A : Tuple = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
A : Optional[Any] = 0.0
A : List[str] = fs_config.activation_fn.name
A : List[str] = fs_config.encoder_embed_dim
A : Optional[Any] = 0.02
A : Any = fs_config.encoder_ffn_embed_dim
A : str = 1E-5
A : str = fs_config.encoder_layerdrop
A : List[str] = fs_config.encoder_attention_heads
A : List[str] = fs_config.conv_pos_groups
A : str = fs_config.conv_pos
A : Union[str, Any] = len(snake_case__ )
A : str = fs_config.encoder_layers
A : List[Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
A : Optional[int] = model.cfg
A : Union[str, Any] = fs_config.final_dropout
A : Optional[int] = fs_config.layerdrop
A : str = fs_config.activation_dropout
A : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
A : Dict = fs_config.attention_dropout
A : List[str] = fs_config.dropout_input
A : Union[str, Any] = fs_config.dropout
A : int = fs_config.mask_channel_length
A : str = fs_config.mask_channel_prob
A : List[str] = fs_config.mask_length
A : Tuple = fs_config.mask_prob
A : Optional[int] = '''Wav2Vec2FeatureExtractor'''
A : Union[str, Any] = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=True ):
'''simple docstring'''
if is_finetuned:
A, A, A : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
A, A, A : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
A : List[str] = SEWConfig.from_pretrained(snake_case__ )
else:
A : Dict = convert_config(model[0] , snake_case__ )
A : str = model[0].eval()
A : Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False
A : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
if is_finetuned:
if dict_path:
A : Optional[Any] = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A : Optional[Any] = target_dict.pad_index
A : Optional[Any] = target_dict.bos_index
A : Union[str, Any] = target_dict.pad_index
A : Union[str, Any] = target_dict.bos_index
A : int = target_dict.eos_index
A : Tuple = len(target_dict.symbols )
A : List[str] = os.path.join(snake_case__ , '''vocab.json''' )
if not os.path.isdir(snake_case__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , snake_case__ )
A : str = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=snake_case__ , )
A : List[str] = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
A : Tuple = SEWForCTC(snake_case__ )
else:
A : Dict = SEWModel(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
recursively_load_weights(snake_case__ , snake_case__ , snake_case__ )
hf_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowercase : Any = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 3 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : Dict = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def __lowerCamelCase ( _lowercase ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape
UpperCAmelCase : Dict = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase : Optional[Any] = emb.weight.data
return lin_layer
def __lowerCamelCase ( _lowercase , _lowercase=None ) -> List[Any]:
UpperCAmelCase : Dict = {}
for old_key in state_dict.keys():
UpperCAmelCase : str = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase : int = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
UpperCAmelCase : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
UpperCAmelCase : str = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
UpperCAmelCase : Tuple = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
UpperCAmelCase : List[Any] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
UpperCAmelCase : Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase : str = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
UpperCAmelCase : Optional[int] = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
UpperCAmelCase : str = state_dict[old_key]
return new_dict
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ) -> Tuple:
UpperCAmelCase : Any = []
UpperCAmelCase : Dict = 0
os.makedirs(_lowercase , exist_ok=_lowercase )
for expert in range(_lowercase ):
UpperCAmelCase : str = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowercase ):
UpperCAmelCase : Optional[Any] = torch.load(_lowercase )["""model"""]
remove_ignore_keys_(_lowercase )
UpperCAmelCase : List[str] = rename_fairseq_keys(_lowercase , _lowercase )
UpperCAmelCase : Optional[Any] = os.path.join(
_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) )
torch.save(_lowercase , _lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowercase )[0]].dtype )
# Add the last block
UpperCAmelCase : Optional[Any] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) )
UpperCAmelCase : Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowercase )
UpperCAmelCase : Optional[Any] = rename_fairseq_keys(_lowercase , _lowercase )
UpperCAmelCase : int = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowercase ) == 1:
UpperCAmelCase : Dict = os.path.join(_lowercase , _lowercase )
torch.save(_lowercase , _lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowercase , _lowercase )
# Otherwise, let's build the index
UpperCAmelCase : Optional[int] = {}
for idx, shard in enumerate(_lowercase ):
UpperCAmelCase : List[str] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase ):05d}.bin''' )
UpperCAmelCase : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowercase , os.path.join(_lowercase , _lowercase ) )
for key in shard:
UpperCAmelCase : Tuple = shard_file
# Add the metadata
UpperCAmelCase : Any = {"""total_size""": total_size}
UpperCAmelCase : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowercase , _lowercase ) , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase : str = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + """\n"""
f.write(_lowercase )
return metadata, index
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a : int = parser.parse_args()
a , a : Any = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
a : str = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
a : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 265 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any]=1_3 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=9_9 , UpperCAmelCase__ : List[str]=3_2 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Tuple=3_7 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : int=5_1_2 , UpperCAmelCase__ : int=1_6 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Any=4 , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_choices
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_attention_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : Any ) -> str:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Optional[Any] = True
lowerCamelCase : Tuple = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> str:
lowerCAmelCase = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCAmelCase__ )
lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
lowerCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowerCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(UpperCAmelCase__ )[0]
lowerCAmelCase = 5_0_0_0_0
lowerCAmelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCAmelCase__ )
lowerCAmelCase = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
a : Union[str, Any] = torch.device("""cpu""")
def __lowerCamelCase ( ) -> Any:
UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
def __lowerCamelCase ( _lowercase ) -> Dict:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : Union[str, Any] = dct.pop(_lowercase )
UpperCAmelCase : str = val
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCAmelCase : Tuple = []
for k in state_dict.keys():
UpperCAmelCase : Dict = k
if ".pwconv" in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
UpperCAmelCase : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
UpperCAmelCase : Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
UpperCAmelCase : Optional[Any] = k_new.split(""".""" )
if ls[2].isdigit():
UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase : List[Any] = 1_0_0_0
UpperCAmelCase : List[str] = """huggingface/label-files"""
UpperCAmelCase : Tuple = """imagenet-1k-id2label.json"""
UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Tuple = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Tuple = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase : List[Any] = [3, 3, 6, 4]
UpperCAmelCase : int = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase : str = [3, 3, 9, 6]
UpperCAmelCase : str = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase : List[Any] = [4, 3, 1_0, 5]
UpperCAmelCase : Union[str, Any] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase : Any = [4, 4, 1_2, 6]
UpperCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase )
else:
UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" )
UpperCAmelCase : str = checkpoint
UpperCAmelCase : Tuple = create_rename_keys(_lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# load HuggingFace model
UpperCAmelCase : str = SwiftFormerForImageClassification(_lowercase ).eval()
hf_model.load_state_dict(_lowercase )
# prepare test inputs
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
UpperCAmelCase : List[str] = processor(images=_lowercase , return_tensors="""pt""" )
# compare outputs from both models
UpperCAmelCase : List[str] = get_expected_output(_lowercase )
UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
a : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 265 | 0 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="resnet50" , UpperCAmelCase=3 , UpperCAmelCase=3_2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , ) -> str:
_lowercase =parent
_lowercase =out_indices if out_indices is not None else [4]
_lowercase =stage_names
_lowercase =out_features
_lowercase =backbone
_lowercase =batch_size
_lowercase =image_size
_lowercase =num_channels
_lowercase =use_pretrained_backbone
_lowercase =is_training
def __A (self ) -> Tuple:
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase =self.get_config()
return config, pixel_values
def __A (self ) -> Optional[Any]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> int:
_lowercase =TimmBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def __A (self ) -> List[str]:
_lowercase =self.prepare_config_and_inputs()
_lowercase , _lowercase =config_and_inputs
_lowercase ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> Optional[int]:
_lowercase =TimmBackboneModelTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def __A (self ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A (self ) -> Optional[Any]:
_lowercase ='''resnet18'''
_lowercase ='''microsoft/resnet-18'''
_lowercase =AutoBackbone.from_pretrained(UpperCAmelCase , use_timm_backbone=UpperCAmelCase )
_lowercase =AutoBackbone.from_pretrained(UpperCAmelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_lowercase =AutoBackbone.from_pretrained(UpperCAmelCase , use_timm_backbone=UpperCAmelCase , out_indices=[1, 2, 3] )
_lowercase =AutoBackbone.from_pretrained(UpperCAmelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def __A (self ) -> Optional[Any]:
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def __A (self ) -> Tuple:
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def __A (self ) -> List[Any]:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __A (self ) -> Tuple:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __A (self ) -> Any:
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def __A (self ) -> Any:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A (self ) -> Optional[Any]:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __A (self ) -> Tuple:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __A (self ) -> int:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A (self ) -> Tuple:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A (self ) -> Tuple:
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def __A (self ) -> int:
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def __A (self ) -> List[str]:
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def __A (self ) -> Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A (self ) -> List[Any]:
pass
def __A (self ) -> str:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(UpperCAmelCase )
_lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def __A (self ) -> Union[str, Any]:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =True
_lowercase =self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase =self.all_model_classes[0]
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
_lowercase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_lowercase =model(**UpperCAmelCase )
_lowercase =outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __A (self ) -> str:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(**UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase =copy.deepcopy(UpperCAmelCase )
_lowercase =None
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(**UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_lowercase =copy.deepcopy(UpperCAmelCase )
_lowercase =False
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(**UpperCAmelCase )
| 5 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowerCamelCase ( ) -> Any:
raise RuntimeError("""CUDA out of memory.""" )
class UpperCamelCase_ ( nn.Module ):
def __init__( self ) -> Any:
super().__init__()
UpperCAmelCase : Tuple = nn.Linear(3 , 4 )
UpperCAmelCase : Tuple = nn.BatchNormad(4 )
UpperCAmelCase : int = nn.Linear(4 , 5 )
def _lowercase( self , A ) -> Any:
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(A , [128, 64, 32, 16, 8] )
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase , UpperCAmelCase : Optional[int] = mock_training_loop_function("""hello""" )
self.assertListEqual(A , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def _lowercase( self ) -> Any:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(A ):
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A , A ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(A ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def _lowercase( self ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated()
UpperCAmelCase : List[str] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , A )
UpperCAmelCase : Tuple = release_memory(A )
self.assertEqual(torch.cuda.memory_allocated() , A )
| 265 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
A : List[Any] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
A : Any = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
A : Optional[int] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 1 , _snake_case = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case , hypotheses=_snake_case , min_len=_snake_case , max_len=_snake_case )
} | 6 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[int] = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 265 | 0 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = (KDPMaDiscreteScheduler,)
lowerCamelCase = 10
def snake_case__ ( self : Dict,**lowercase_ : Union[str, Any] )-> Any:
'''simple docstring'''
A__ = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowercase_ )
return config
def snake_case__ ( self : List[str] )-> Union[str, Any]:
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def snake_case__ ( self : Any )-> List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001],[0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase_,beta_end=lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Tuple:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type='v_prediction' )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(lowercase_,lowercase_ )
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1_112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def snake_case__ ( self : Tuple )-> Tuple:
'''simple docstring'''
if torch_device == "mps":
return
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(lowercase_,lowercase_ )
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def snake_case__ ( self : Optional[Any] )-> Any:
'''simple docstring'''
if torch_device == "mps":
return
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps,device=lowercase_ )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.to(lowercase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(lowercase_,lowercase_ )
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
if str(lowercase_ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 7 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = LongformerTokenizer
lowercase = True
lowercase = LongformerTokenizerFast
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCAmelCase : int = dict(zip(A , range(len(A ) ) ) )
UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def _lowercase( self , **A ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , **A ) -> int:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = """lower newer"""
UpperCAmelCase : Optional[int] = """lower newer"""
return input_text, output_text
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase : Dict = """lower newer"""
UpperCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
UpperCAmelCase : Tuple = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokens + [tokenizer.unk_token]
UpperCAmelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A )
UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A )
UpperCAmelCase : List[str] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : List[str] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : str = self.get_tokenizer()
UpperCAmelCase : List[Any] = """Encode this sequence."""
UpperCAmelCase : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A , A )
UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A , A )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A , A )
# Testing spaces after special tokens
UpperCAmelCase : Union[str, Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
UpperCAmelCase : Union[str, Any] = """Encode <mask> sequence"""
UpperCAmelCase : Union[str, Any] = """Encode <mask>sequence"""
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Union[str, Any] = encoded.index(A )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A , A )
UpperCAmelCase : Tuple = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = encoded.index(A )
UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A , A )
def _lowercase( self ) -> Optional[int]:
pass
def _lowercase( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence."""
UpperCAmelCase : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
UpperCAmelCase : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def _lowercase( self ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""trim_offsets"""] , A )
def _lowercase( self ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase : int = f'''{text_of_1_token} {text_of_1_token}'''
UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[Any] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
| 265 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = BertJapaneseTokenizer
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : str = True
def snake_case__( self : str ) ->Tuple:
super().setUp()
snake_case_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] ) ->List[str]:
snake_case_ = '''こんにちは、世界。 \nこんばんは、世界。'''
snake_case_ = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def snake_case__( self : Optional[Any] , _UpperCamelCase : Dict ) ->Tuple:
snake_case_, snake_case_ = self.get_input_output_texts(_UpperCamelCase )
snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
return text, ids
def snake_case__( self : Any ) ->Dict:
pass # TODO add if relevant
def snake_case__( self : Optional[Any] ) ->Optional[Any]:
pass # TODO add if relevant
def snake_case__( self : Optional[Any] ) ->Any:
pass # TODO add if relevant
def snake_case__( self : Optional[int] ) ->int:
snake_case_ = self.tokenizer_class(self.vocab_file )
snake_case_ = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def snake_case__( self : Dict ) ->Any:
snake_case_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(_UpperCamelCase )
snake_case_ = '''こんにちは、世界。\nこんばんは、世界。'''
snake_case_ = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
snake_case_ = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_UpperCamelCase , '''wb''' ) as handle:
pickle.dump(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , '''rb''' ) as handle:
snake_case_ = pickle.load(_UpperCamelCase )
snake_case_ = tokenizer_new.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[Any] ) ->Tuple:
snake_case_ = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def snake_case__( self : int ) ->List[Any]:
try:
snake_case_ = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def snake_case__( self : Union[str, Any] ) ->str:
try:
snake_case_ = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def snake_case__( self : List[str] ) ->Dict:
snake_case_ = MecabTokenizer(do_lower_case=_UpperCamelCase , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def snake_case__( self : Optional[int] ) ->List[str]:
try:
snake_case_ = MecabTokenizer(
do_lower_case=_UpperCamelCase , normalize_text=_UpperCamelCase , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def snake_case__( self : Optional[int] ) ->Union[str, Any]:
snake_case_ = MecabTokenizer(normalize_text=_UpperCamelCase , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def snake_case__( self : Optional[Any] ) ->str:
snake_case_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(_UpperCamelCase )
snake_case_ = '''こんにちは、世界。\nこんばんは、世界。'''
snake_case_ = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
snake_case_ = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_UpperCamelCase , '''wb''' ) as handle:
pickle.dump(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , '''rb''' ) as handle:
snake_case_ = pickle.load(_UpperCamelCase )
snake_case_ = tokenizer_new.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@require_sudachi
def snake_case__( self : Tuple ) ->Optional[int]:
snake_case_ = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def snake_case__( self : str ) ->Tuple:
snake_case_ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def snake_case__( self : Dict ) ->List[Any]:
snake_case_ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def snake_case__( self : Optional[int] ) ->Tuple:
snake_case_ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = SudachiTokenizer(do_lower_case=_UpperCamelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def snake_case__( self : Dict ) ->List[str]:
snake_case_ = SudachiTokenizer(normalize_text=_UpperCamelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_ = SudachiTokenizer(trim_whitespace=_UpperCamelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def snake_case__( self : int ) ->Union[str, Any]:
snake_case_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(_UpperCamelCase )
snake_case_ = '''こんにちは、世界。\nこんばんは、世界。'''
snake_case_ = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
snake_case_ = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_UpperCamelCase , '''wb''' ) as handle:
pickle.dump(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , '''rb''' ) as handle:
snake_case_ = pickle.load(_UpperCamelCase )
snake_case_ = tokenizer_new.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@require_jumanpp
def snake_case__( self : List[str] ) ->Dict:
snake_case_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def snake_case__( self : Any ) ->Any:
snake_case_ = JumanppTokenizer(do_lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def snake_case__( self : int ) ->Dict:
snake_case_ = JumanppTokenizer(normalize_text=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def snake_case__( self : int ) ->Optional[Any]:
snake_case_ = JumanppTokenizer(trim_whitespace=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def snake_case__( self : Any ) ->Optional[int]:
snake_case_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def snake_case__( self : Any ) ->List[Any]:
snake_case_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
snake_case_ = {}
for i, token in enumerate(_UpperCamelCase ):
snake_case_ = i
snake_case_ = WordpieceTokenizer(vocab=_UpperCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def snake_case__( self : Optional[Any] ) ->Optional[int]:
snake_case_ = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
snake_case_ = tokenizer.subword_tokenizer
snake_case_ = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(_UpperCamelCase , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
snake_case_ = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(_UpperCamelCase , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def snake_case__( self : str ) ->Tuple:
snake_case_ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
snake_case_ = tokenizer.encode('''ありがとう。''' , add_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = BertJapaneseTokenizer
SCREAMING_SNAKE_CASE : int = False
def snake_case__( self : List[str] ) ->int:
super().setUp()
snake_case_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def snake_case__( self : Optional[Any] , **_UpperCamelCase : Union[str, Any] ) ->int:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **_UpperCamelCase )
def snake_case__( self : Any , _UpperCamelCase : Union[str, Any] ) ->List[Any]:
snake_case_ = '''こんにちは、世界。 \nこんばんは、世界。'''
snake_case_ = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def snake_case__( self : Dict ) ->Union[str, Any]:
pass # TODO add if relevant
def snake_case__( self : Any ) ->Union[str, Any]:
pass # TODO add if relevant
def snake_case__( self : Tuple ) ->Tuple:
pass # TODO add if relevant
def snake_case__( self : List[Any] ) ->int:
snake_case_ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
snake_case_ = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
_UpperCamelCase , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def snake_case__( self : List[str] ) ->List[str]:
snake_case_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
snake_case_ = {}
for i, token in enumerate(_UpperCamelCase ):
snake_case_ = i
snake_case_ = CharacterTokenizer(vocab=_UpperCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def snake_case__( self : Dict ) ->Tuple:
snake_case_ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
snake_case_ = tokenizer.encode('''ありがとう。''' , add_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : str ) ->int:
snake_case_ = '''cl-tohoku/bert-base-japanese'''
snake_case_ = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[int] ) ->Dict:
snake_case_ = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(_UpperCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
snake_case_ = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(_UpperCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) ) | 8 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a : Tuple = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _lowercase( self , A , A , A ) -> Dict:
UpperCAmelCase : Union[str, Any] = ZeroShotClassificationPipeline(
model=A , tokenizer=A , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _lowercase( self , A , A ) -> Optional[int]:
UpperCAmelCase : Dict = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
# No kwarg
UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
UpperCAmelCase : str = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
UpperCAmelCase : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
UpperCAmelCase : Dict = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
# https://github.com/huggingface/transformers/issues/13846
UpperCAmelCase : str = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
A , [
{"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]}
for i in range(1 )
] , )
UpperCAmelCase : List[str] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
A , [
{"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]}
for i in range(2 )
] , )
with self.assertRaises(A ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(A ):
classifier(A , candidate_labels="""politics""" )
with self.assertRaises(A ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(A ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=A )
with self.assertRaises(A ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(A ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A , )
self.run_entailment_id(A )
def _lowercase( self , A ) -> Any:
UpperCAmelCase : Tuple = zero_shot_classifier.model.config
UpperCAmelCase : Union[str, Any] = config.labelaid
UpperCAmelCase : Tuple = zero_shot_classifier.entailment_id
UpperCAmelCase : Any = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
UpperCAmelCase : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCAmelCase : Any = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCAmelCase : List[str] = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
UpperCAmelCase : Tuple = original_labelaid
self.assertEqual(A , zero_shot_classifier.entailment_id )
@require_torch
def _lowercase( self ) -> str:
UpperCAmelCase : int = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
UpperCAmelCase : Union[str, Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
UpperCAmelCase : List[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[int] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
UpperCAmelCase : Optional[int] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
UpperCAmelCase : str = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
UpperCAmelCase : Tuple = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
UpperCAmelCase : Any = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 265 | 0 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaPhonemeCTCTokenizer
SCREAMING_SNAKE_CASE__ : Tuple = False
def __magic_name__( self :Any ) -> Optional[Any]:
super().setUp()
__SCREAMING_SNAKE_CASE : Dict = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
__SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :List[str]=20 , lowerCAmelCase__ :str=5 ) -> Tuple[str, list]:
__SCREAMING_SNAKE_CASE : Dict = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase__ )) for i in range(len(lowerCAmelCase__ ) )]
__SCREAMING_SNAKE_CASE : Tuple = list(filter(lambda lowerCAmelCase__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowerCAmelCase__ ) , lowerCAmelCase__ ) )
if max_length is not None and len(lowerCAmelCase__ ) > max_length:
__SCREAMING_SNAKE_CASE : int = toks[:max_length]
if min_length is not None and len(lowerCAmelCase__ ) < min_length and len(lowerCAmelCase__ ) > 0:
while len(lowerCAmelCase__ ) < min_length:
__SCREAMING_SNAKE_CASE : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
__SCREAMING_SNAKE_CASE : List[Any] = [t[0] for t in toks]
# Ensure consistency
__SCREAMING_SNAKE_CASE : str = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
if " " not in output_txt and len(lowerCAmelCase__ ) > 1:
__SCREAMING_SNAKE_CASE : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase__ )
)
if with_prefix_space:
__SCREAMING_SNAKE_CASE : int = ''' ''' + output_txt
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
return output_txt, output_ids
def __magic_name__( self :Optional[int] , **lowerCAmelCase__ :Dict ) -> Dict:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer('''m xxx ɪ''' , do_phonemize=lowerCAmelCase__ ).input_ids
self.assertEqual(lowerCAmelCase__ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=lowerCAmelCase__ ).input_ids
self.assertEqual(lowerCAmelCase__ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer('''maɪ c''' , do_phonemize=lowerCAmelCase__ ).input_ids
self.assertEqual(lowerCAmelCase__ , [3, 200] ) # mai should be <unk> (=3)
def __magic_name__( self :Any ) -> str:
__SCREAMING_SNAKE_CASE : Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
__SCREAMING_SNAKE_CASE : Optional[int] = '''Hello how are you'''
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.phonemize(lowerCAmelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(lowerCAmelCase__ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def __magic_name__( self :Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
__SCREAMING_SNAKE_CASE : Optional[int] = '''Hello how are you'''
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.phonemize(lowerCAmelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(lowerCAmelCase__ ).input_ids , tokenizer(lowerCAmelCase__ , do_phonemize=lowerCAmelCase__ ).input_ids )
def __magic_name__( self :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
__SCREAMING_SNAKE_CASE : Dict = '''Hello how are you'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.phonemize(lowerCAmelCase__ , phonemizer_lang='''en-us''' )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(tokenizer(lowerCAmelCase__ ).input_ids )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
__SCREAMING_SNAKE_CASE : List[str] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(sample_ids[0] )
__SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , batch_tokens[0] )
self.assertEqual(lowerCAmelCase__ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def __magic_name__( self :int ) -> Any:
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
__SCREAMING_SNAKE_CASE : List[str] = '''Hello how are you'''
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.phonemize(lowerCAmelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(lowerCAmelCase__ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
__SCREAMING_SNAKE_CASE : List[str] = '''Hello how are you'''
__SCREAMING_SNAKE_CASE : str = tokenizer.phonemize(lowerCAmelCase__ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(lowerCAmelCase__ ).input_ids , tokenizer(lowerCAmelCase__ , do_phonemize=lowerCAmelCase__ ).input_ids )
def __magic_name__( self :str ) -> Any:
__SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
__SCREAMING_SNAKE_CASE : int = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(sample_ids[0] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , batch_tokens[0] )
self.assertEqual(lowerCAmelCase__ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
__SCREAMING_SNAKE_CASE : Any = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(lowerCAmelCase__ , filter_word_delimiter_token=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , batch_tokens[0] )
self.assertEqual(lowerCAmelCase__ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def __magic_name__( self :Any ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
__SCREAMING_SNAKE_CASE : Any = '''Hello how are you'''
__SCREAMING_SNAKE_CASE : Dict = tokenizer.phonemize(lowerCAmelCase__ , phonemizer_lang='''en-us''' )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(tokenizer(lowerCAmelCase__ ).input_ids , filter_word_delimiter_token=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
__SCREAMING_SNAKE_CASE : List[Any] = '''Hello how are you'''
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.phonemize(lowerCAmelCase__ , phonemizer_lang='''en-us''' )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(tokenizer(lowerCAmelCase__ ).input_ids , filter_word_delimiter_token=lowerCAmelCase__ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = '''Hello how are you'''
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(lowerCAmelCase__ , phonemizer_lang='''en-us''' ).input_ids
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(lowerCAmelCase__ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(lowerCAmelCase__ , '''ɛ l o h aʊ a ʁ j u''' )
def __magic_name__( self :Any ) -> int:
__SCREAMING_SNAKE_CASE : Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
__SCREAMING_SNAKE_CASE : str = '''Hello how Are you'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''hello how are you'''
__SCREAMING_SNAKE_CASE : Dict = tokenizer(lowerCAmelCase__ ).input_ids
__SCREAMING_SNAKE_CASE : Tuple = tokenizer(lowerCAmelCase__ ).input_ids
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
__SCREAMING_SNAKE_CASE : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def __magic_name__( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : str = [d[key] for d in offsets]
return retrieved_list
def __magic_name__( self :int ) -> str:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__SCREAMING_SNAKE_CASE : Any = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__SCREAMING_SNAKE_CASE : Any = tokenizer.decode(lowerCAmelCase__ , output_char_offsets=lowerCAmelCase__ , filter_word_delimiter_token=lowerCAmelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def __magic_name__( self :Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any ):
self.assertTrue(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertTrue(isinstance(outputs_list[0] , lowerCAmelCase__ ) )
# transform list to ModelOutput
__SCREAMING_SNAKE_CASE : List[str] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
[recursive_check(lowerCAmelCase__ , lowerCAmelCase__ ) for la, la in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
__SCREAMING_SNAKE_CASE : Tuple = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCAmelCase__ , output_char_offsets=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = [tokenizer.decode(lowerCAmelCase__ , output_char_offsets=lowerCAmelCase__ ) for ids in sample_ids]
check_list_tuples_equal(lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def __magic_name__( self :Tuple ) -> Any:
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def __magic_name__( self :int ) -> str:
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def __magic_name__( self :Any ) -> Any:
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def __magic_name__( self :Dict ) -> str:
pass
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__SCREAMING_SNAKE_CASE : str = tokenizer.vocab_size
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
self.assertNotEqual(lowerCAmelCase__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__SCREAMING_SNAKE_CASE : Optional[int] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
__SCREAMING_SNAKE_CASE : Dict = tokenizer.add_tokens(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = tokenizer.vocab_size
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
self.assertNotEqual(lowerCAmelCase__ , 0 )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
self.assertEqual(lowerCAmelCase__ , all_size + len(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : str = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=lowerCAmelCase__ )
self.assertGreaterEqual(len(lowerCAmelCase__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__SCREAMING_SNAKE_CASE : str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.add_special_tokens(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.vocab_size
__SCREAMING_SNAKE_CASE : Dict = len(lowerCAmelCase__ )
self.assertNotEqual(lowerCAmelCase__ , 0 )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
self.assertEqual(lowerCAmelCase__ , all_size_a + len(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=lowerCAmelCase__ )
self.assertGreaterEqual(len(lowerCAmelCase__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __magic_name__( self :str ) -> Tuple:
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __magic_name__( self :Tuple ) -> str:
pass
def __magic_name__( self :Dict ) -> Any:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizers(fast=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__SCREAMING_SNAKE_CASE : int = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertIsInstance(output['''text'''] , lowerCAmelCase__ )
| 9 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a : List[Any] = """__DUMMY_TRANSFORMERS_USER__"""
a : Tuple = """Dummy User"""
a : Optional[Any] = """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"""
a : Optional[Any] = """https://hub-ci.huggingface.co"""
a : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
a : Tuple = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
a : str = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> List[str]:
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _lowercase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Any:
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
HfFolder.save_token(_lowercase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> str:
return HfApi(endpoint=_lowercase )
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : str = HfFolder.get_token()
HfFolder.save_token(_lowercase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Any:
def _cleanup_repo(_lowercase ):
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> List[str]:
@contextmanager
def _temporary_repo(_lowercase ):
try:
yield repo_id
finally:
cleanup_repo(_lowercase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
UpperCAmelCase : str = F'''repo_txt_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : List[Any] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data/text_data.txt""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : Optional[int] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple:
UpperCAmelCase : List[Any] = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : List[str] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
return hf_private_dataset_repo_zipped_img_data_
| 265 | 0 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: str =[
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: Tuple =emb.weight.shape
lowerCamelCase__: Tuple =nn.Linear(__a , __a , bias=__a )
lowerCamelCase__: int =emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __a , __a=None ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Optional[Any] ={}
for old_key in state_dict.keys():
lowerCamelCase__: List[Any] =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase__: int =key.replace("moe_layer.experts.0" , F"""ffn.experts.expert_{expert_idx}""" )
else:
lowerCamelCase__: Optional[Any] =key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
lowerCamelCase__: int =key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
lowerCamelCase__: Tuple =key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
lowerCamelCase__: Tuple =key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
lowerCamelCase__: int =key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
lowerCamelCase__: Optional[int] =key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
lowerCamelCase__: List[str] =key.replace("final_layer_norm" , "ff_layer_norm" )
lowerCamelCase__: List[str] =state_dict[old_key]
return new_dict
def lowerCAmelCase_ ( __a , __a , __a , __a , __a = WEIGHTS_NAME ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[str] =[]
lowerCamelCase__: Any =0
os.makedirs(__a , exist_ok=__a )
for expert in range(__a ):
lowerCamelCase__: Any =switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(__a ):
lowerCamelCase__: Union[str, Any] =torch.load(__a )["model"]
remove_ignore_keys_(__a )
lowerCamelCase__: int =rename_fairseq_keys(__a , __a )
lowerCamelCase__: Any =os.path.join(
__a , weights_name.replace(".bin" , F"""-{len(__a )+1:05d}-of-???.bin""" ) )
torch.save(__a , __a )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__a )[0]].dtype )
# Add the last block
lowerCamelCase__: Optional[int] =os.path.join(__a , weights_name.replace(".bin" , F"""-{len(__a )+1:05d}-of-???.bin""" ) )
lowerCamelCase__: str =torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(__a )
lowerCamelCase__: Any =rename_fairseq_keys(__a , __a )
lowerCamelCase__: Optional[Any] =shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__a ) == 1:
lowerCamelCase__: Optional[int] =os.path.join(__a , __a )
torch.save(__a , __a )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__a , __a )
# Otherwise, let's build the index
lowerCamelCase__: Dict ={}
for idx, shard in enumerate(__a ):
lowerCamelCase__: str =weights_name.replace(".bin" , F"""-{idx+1:05d}-of-{len(__a ):05d}.bin""" )
lowerCamelCase__: Optional[Any] =os.path.join(__a , weights_name.replace(".bin" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__a , os.path.join(__a , __a ) )
for key in shard:
lowerCamelCase__: List[str] =shard_file
# Add the metadata
lowerCamelCase__: List[str] ={"total_size": total_size}
lowerCamelCase__: List[Any] ={"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__a , __a ) , "w" , encoding="utf-8" ) as f:
lowerCamelCase__: str =json.dumps(__a , indent=2 , sort_keys=__a ) + "\n"
f.write(__a )
return metadata, index
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__A = parser.parse_args()
__A , __A = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__A = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__A = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 10 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , **A ) -> List[str]:
super().__init__(**A )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , A , **A ) -> Optional[Any]:
return super().__call__(A , **A )
def _lowercase( self , **A ) -> Optional[Any]:
UpperCAmelCase : List[Any] = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Dict = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
UpperCAmelCase : Optional[Any] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase( self , A , A=None , A="This is a photo of {}." ) -> Optional[Any]:
UpperCAmelCase : int = load_image(A )
UpperCAmelCase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
UpperCAmelCase : List[str] = candidate_labels
UpperCAmelCase : Tuple = [hypothesis_template.format(A ) for x in candidate_labels]
UpperCAmelCase : Union[str, Any] = self.tokenizer(A , return_tensors=self.framework , padding=A )
UpperCAmelCase : Union[str, Any] = [text_inputs]
return inputs
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : List[Any] = model_inputs.pop("""candidate_labels""" )
UpperCAmelCase : Optional[Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , A ):
UpperCAmelCase : Optional[Any] = text_inputs[0]
else:
# Batching case.
UpperCAmelCase : Any = text_inputs[0][0]
UpperCAmelCase : Dict = self.model(**A , **A )
UpperCAmelCase : List[Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def _lowercase( self , A ) -> Union[str, Any]:
UpperCAmelCase : int = model_outputs.pop("""candidate_labels""" )
UpperCAmelCase : int = model_outputs["""logits"""][0]
if self.framework == "pt":
UpperCAmelCase : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCAmelCase : Any = probs.tolist()
if not isinstance(A , A ):
UpperCAmelCase : Any = [scores]
elif self.framework == "tf":
UpperCAmelCase : List[str] = stable_softmax(A , axis=-1 )
UpperCAmelCase : Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCAmelCase : Any = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(A , A ) , key=lambda A : -x[0] )
]
return result
| 265 | 0 |
lowerCAmelCase__ = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 11 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
return getitem, k
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
return setitem, k, v
def __lowerCamelCase ( _lowercase ) -> int:
return delitem, k
def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]:
try:
return fun(_lowercase , *_lowercase ), None
except Exception as e:
return None, e
a : List[str] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a : List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a : int = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a : List[Any] = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a : Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : List[str] = HashMap(initial_block_size=4 )
UpperCAmelCase : Dict = {}
for _, (fun, *args) in enumerate(_lowercase ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase )
UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase )
assert my_res == py_res
assert str(_lowercase ) == str(_lowercase )
assert set(_lowercase ) == set(_lowercase )
assert len(_lowercase ) == len(_lowercase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowercase ) -> bool:
return not name.startswith("""_""" )
UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )}
UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )}
assert dict_public_names > hash_public_names
| 265 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any]=99 , UpperCamelCase_: List[str]=13 , UpperCamelCase_: int=16 , UpperCamelCase_: Tuple=7 , UpperCamelCase_: List[str]=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Dict=False , UpperCamelCase_: str=True , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Union[str, Any]=32 , UpperCamelCase_: Any=4 , UpperCamelCase_: Dict=4 , UpperCamelCase_: str=30 , UpperCamelCase_: List[str]=0 , UpperCamelCase_: Dict=1 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: Dict=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = decoder_seq_length
# For common tests
__lowerCamelCase = self.decoder_seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = d_model
__lowerCamelCase = d_model
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = eos_token_id
__lowerCamelCase = bos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = decoder_start_token_id
__lowerCamelCase = use_cache
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = None
__lowerCamelCase = decoder_seq_length
__lowerCamelCase = 2
__lowerCamelCase = 1
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCamelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , ):
__lowerCamelCase = True
__lowerCamelCase = TrOCRDecoder(config=UpperCamelCase_ ).to(UpperCamelCase_ ).eval()
__lowerCamelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__lowerCamelCase = model(UpperCamelCase_ , use_cache=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ , use_cache=UpperCamelCase_ )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) + 1 )
__lowerCamelCase = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = model(UpperCamelCase_ )["""last_hidden_state"""]
__lowerCamelCase = model(UpperCamelCase_ , past_key_values=UpperCamelCase_ )["""last_hidden_state"""]
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Tuple = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
UpperCAmelCase__ : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Dict = False
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase_ )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
pass
def lowerCAmelCase__ ( self: List[str] ):
pass
def lowerCAmelCase__ ( self: Union[str, Any] ):
pass
def lowerCAmelCase__ ( self: List[str] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase__ ( self: Union[str, Any] ):
pass
| 12 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a : List[str] = 2_5_0_0_0_4
a : List[str] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = MBartTokenizer
lowercase = MBartTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : str = MBartTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self ) -> int:
UpperCAmelCase : Optional[Any] = MBartTokenizer(A , keep_accents=A )
UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A )
UpperCAmelCase : int = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : str = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = 'facebook/mbart-large-en-ro'
lowercase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowercase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowercase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def _lowercase( cls ) -> Tuple:
UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
UpperCAmelCase : int = 1
return cls
def _lowercase( self ) -> Union[str, Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def _lowercase( self ) -> List[str]:
self.assertIn(A , self.tokenizer.all_special_ids )
UpperCAmelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase : int = self.tokenizer.decode(A , skip_special_tokens=A )
UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
UpperCAmelCase : int = 10
UpperCAmelCase : List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A )
self.assertEqual(len(A ) , A )
def _lowercase( self ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = tempfile.mkdtemp()
UpperCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
UpperCAmelCase : Tuple = MBartTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
UpperCAmelCase : Dict = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
UpperCAmelCase : Dict = targets["""input_ids"""]
UpperCAmelCase : Union[str, Any] = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 3034, 2, 250004]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250001,
} , )
| 265 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : List[str] = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
a : List[str] = get_tests_dir("""fixtures""")
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase : Tuple = mock.Mock()
UpperCAmelCase : List[str] = 500
UpperCAmelCase : Any = {}
UpperCAmelCase : List[str] = HTTPError
UpperCAmelCase : str = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head:
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase( self ) -> Any:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def _lowercase( self ) -> Union[str, Any]:
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(A )
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
@classmethod
def _lowercase( cls ) -> Dict:
UpperCAmelCase : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def _lowercase( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token )
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token )
UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def _lowercase( self ) -> Optional[int]:
CustomImageProcessor.register_for_auto_class()
UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 265 | 0 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : List[str] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = BartphoTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
super().setUp()
A__ = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__))))
A__ = {'''unk_token''': '''<unk>'''}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''])
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''') as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""")
A__ = BartphoTokenizer(UpperCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **UpperCAmelCase__ : Dict) ->Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = '''This is a là test'''
A__ = '''This is a<unk><unk> test'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
'''simple docstring'''
A__ = BartphoTokenizer(UpperCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map)
A__ = '''This is a là test'''
A__ = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
A__ = tokenizer.tokenize(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
A__ = tokens + [tokenizer.unk_token]
A__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , UpperCAmelCase__)
| 14 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( _lowercase ) -> Tuple:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : Any = create_tensor(_lowercase )
UpperCAmelCase : Union[str, Any] = gather(_lowercase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : Any = [state.process_index]
UpperCAmelCase : Union[str, Any] = gather_object(_lowercase )
assert len(_lowercase ) == state.num_processes, F'''{gathered_obj}, {len(_lowercase )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : Optional[int] = create_tensor(_lowercase )
UpperCAmelCase : List[str] = broadcast(_lowercase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __lowerCamelCase ( _lowercase ) -> Tuple:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
UpperCAmelCase : Tuple = torch.arange(state.num_processes ).to(state.device )
UpperCAmelCase : Optional[Any] = pad_across_processes(_lowercase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __lowerCamelCase ( _lowercase ) -> Dict:
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase : Optional[Any] = create_tensor(_lowercase )
UpperCAmelCase : Optional[Any] = reduce(_lowercase , """sum""" )
UpperCAmelCase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}'''
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase : Tuple = create_tensor(_lowercase )
UpperCAmelCase : Optional[int] = reduce(_lowercase , """mean""" )
UpperCAmelCase : str = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}'''
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
def __lowerCamelCase ( ) -> int:
UpperCAmelCase : List[Any] = PartialState()
state.print(F'''State: {state}''' )
state.print("""testing gather""" )
test_gather(_lowercase )
state.print("""testing gather_object""" )
test_gather_object(_lowercase )
state.print("""testing broadcast""" )
test_broadcast(_lowercase )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(_lowercase )
state.print("""testing reduce_sum""" )
test_reduce_sum(_lowercase )
state.print("""testing reduce_mean""" )
test_reduce_mean(_lowercase )
if __name__ == "__main__":
main()
| 265 | 0 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ , a_ ) -> None:
"""simple docstring"""
__A = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(a_ ) == len(a_ ), F'''{len(a_ )} != {len(a_ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
SCREAMING_SNAKE_CASE :Tuple = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
SCREAMING_SNAKE_CASE :List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
try:
__A = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(a_ ) )
def UpperCAmelCase ( a_ , a_ ) -> List[int]:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(a_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def UpperCAmelCase ( a_ , a_ = "student" , a_ = None , a_ = None , a_=False , a_=None , a_=None , **a_ , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
"""simple docstring"""
__A = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(a_ , a_ ):
AutoTokenizer.from_pretrained(a_ ).save_pretrained(a_ ) # purely for convenience
__A = AutoModelForSeqaSeqLM.from_pretrained(a_ ).eval()
else:
assert isinstance(a_ , a_ ), F'''teacher must be a model or string got type {type(a_ )}'''
__A = teacher.config.to_diff_dict()
try:
__A , __A = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__A = teacher_e
if d is None:
__A = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
__A , __A = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__A , __A = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__A = teacher_e
if d is None:
__A = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(a_ )
# Copy weights
__A = teacher.config_class(**a_ )
__A = AutoModelForSeqaSeqLM.from_config(a_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__A = student.load_state_dict(teacher.state_dict() , strict=a_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__A , __A = list(range(a_ ) ), list(range(a_ ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(a_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__A = pick_layers_to_copy(a_ , a_ )
if d_layers_to_copy is None:
__A = pick_layers_to_copy(a_ , a_ )
try:
if hasattr(
a_ , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , a_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , a_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , a_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , a_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , a_ )
copy_layers(teacher.decoder.block , student.decoder.block , a_ )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
__A = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(a_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 15 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]:
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def _lowercase( self , A = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def _lowercase( self ) -> Dict:
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]:
if isinstance(A , A ):
UpperCAmelCase : List[str] = 1
elif isinstance(A , A ):
UpperCAmelCase : Dict = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
UpperCAmelCase : List[str] = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCAmelCase : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape
UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 )
UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase : List[str]
if negative_prompt is None:
UpperCAmelCase : Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
UpperCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
UpperCAmelCase : Any = negative_prompt
UpperCAmelCase : Dict = text_input_ids.shape[-1]
UpperCAmelCase : List[Any] = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : int = uncond_embeddings.shape[1]
UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 )
UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCAmelCase : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase : Dict = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
UpperCAmelCase : int = torch.randn(
A , generator=A , device=self.device , dtype=A )
UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCAmelCase : Optional[Any] = latents_reference.to(self.device )
UpperCAmelCase : Tuple = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx
UpperCAmelCase : List[str] = 0 if dy < 0 else dy
UpperCAmelCase : Union[str, Any] = max(-dx , 0 )
UpperCAmelCase : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : Optional[Any] = {}
if accepts_eta:
UpperCAmelCase : List[str] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : str = self.scheduler.scale_model_input(A , A )
# predict the noise residual
UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 )
UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents
UpperCAmelCase : Tuple = self.vae.decode(A ).sample
UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
UpperCAmelCase , UpperCAmelCase : int = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCAmelCase : Any = None
if output_type == "pil":
UpperCAmelCase : int = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 265 | 0 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = PriorTransformer
lowerCAmelCase : int = "hidden_states"
@property
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = 4
lowercase__ : List[Any] = 8
lowercase__ : Any = 7
lowercase__ : int = floats_tensor((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : Any = floats_tensor((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : int = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_snake_case )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCAmelCase ( self : str ,_snake_case : str=0 ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(_snake_case )
lowercase__ : Optional[Any] = 4
lowercase__ : Optional[int] = 8
lowercase__ : Any = 7
lowercase__ : str = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_snake_case )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return (4, 8)
@property
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return (4, 8)
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
lowercase__ : Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self : int ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Tuple = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' ,output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(_snake_case )
lowercase__ : Optional[Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ : Any = self.prepare_init_args_and_inputs_for_common()
lowercase__ : List[str] = self.model_class(**_snake_case )
lowercase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] ,_snake_case )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Any = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
lowercase__ : Dict = model.to(_snake_case )
if hasattr(_snake_case ,'''set_default_attn_processor''' ):
model.set_default_attn_processor()
lowercase__ : Dict = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ : Optional[int] = model(**_snake_case )[0]
lowercase__ : str = output[0, :5].flatten().cpu()
print(_snake_case )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ : Optional[Any] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(_snake_case ,_snake_case ,rtol=1e-2 ) )
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Dict ,_snake_case : List[str]=1 ,_snake_case : Tuple=768 ,_snake_case : List[Any]=77 ,_snake_case : Optional[Any]=0 ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(_snake_case )
lowercase__ : Tuple = batch_size
lowercase__ : List[str] = embedding_dim
lowercase__ : Tuple = num_embeddings
lowercase__ : int = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : str = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : List[str] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_snake_case )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[Any] ,_snake_case : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' ,subfolder='''prior''' )
model.to(_snake_case )
lowercase__ : List[str] = self.get_dummy_seed_input(seed=_snake_case )
with torch.no_grad():
lowercase__ : List[str] = model(**_snake_case )[0]
assert list(sample.shape ) == [1, 768]
lowercase__ : List[str] = sample[0, :8].flatten().cpu()
print(_snake_case )
lowercase__ : Optional[Any] = torch.tensor(_snake_case )
assert torch_all_close(_snake_case ,_snake_case ,atol=1e-3 )
| 16 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
def __init__( self , A , A=12 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=512 , A=0.0_2 , A=0 , A=None , ) -> Any:
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : int = use_input_mask
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : str = hidden_size
UpperCAmelCase : List[Any] = projection_dim
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Any = dropout
UpperCAmelCase : List[Any] = attention_dropout
UpperCAmelCase : Optional[Any] = max_position_embeddings
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = scope
UpperCAmelCase : Union[str, Any] = bos_token_id
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase : Tuple = input_mask.numpy()
UpperCAmelCase , UpperCAmelCase : int = input_mask.shape
UpperCAmelCase : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(A ):
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : int = self.get_config()
return config, input_ids, tf.convert_to_tensor(A )
def _lowercase( self ) -> int:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _lowercase( self , A , A , A ) -> Union[str, Any]:
UpperCAmelCase : int = TFBlipTextModel(config=A )
UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , training=A )
UpperCAmelCase : int = model(A , training=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs
UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = (TFBlipTextModel,) if is_tf_available() else ()
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = BlipTextModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Tuple:
self.config_tester.run_common_tests()
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> List[str]:
pass
def _lowercase( self ) -> Optional[int]:
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def _lowercase( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _lowercase( self ) -> Optional[int]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _lowercase( self ) -> Dict:
pass
@slow
def _lowercase( self ) -> Dict:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = TFBlipTextModel.from_pretrained(A )
self.assertIsNotNone(A )
def _lowercase( self , A=True ) -> str:
super().test_pt_tf_model_equivalence(allow_missing_keys=A )
| 265 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "rwkv"
__UpperCAmelCase : List[Any] = {"max_position_embeddings": "context_length"}
def __init__( self : Optional[Any], UpperCAmelCase__ : Any=5_0_2_7_7, UpperCAmelCase__ : Union[str, Any]=1_0_2_4, UpperCAmelCase__ : int=4_0_9_6, UpperCAmelCase__ : str=3_2, UpperCAmelCase__ : int=None, UpperCAmelCase__ : Optional[int]=None, UpperCAmelCase__ : Optional[int]=1E-5, UpperCAmelCase__ : str=0, UpperCAmelCase__ : Optional[int]=0, UpperCAmelCase__ : Optional[int]=6, UpperCAmelCase__ : Any=False, UpperCAmelCase__ : Any=True, **UpperCAmelCase__ : int, ):
__lowercase = vocab_size
__lowercase = context_length
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowercase = intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowercase = layer_norm_epsilon
__lowercase = rescale_every
__lowercase = use_cache
__lowercase = bos_token_id
__lowercase = eos_token_id
super().__init__(
tie_word_embeddings=UpperCAmelCase__, bos_token_id=UpperCAmelCase__, eos_token_id=UpperCAmelCase__, **UpperCAmelCase__ )
| 17 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
a : str = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
a : int = """main"""
# Default branch name
a : Any = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
a : str = """aaaaaaa"""
# This commit does not exist, so we should 404.
a : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
a : Any = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def __lowerCamelCase ( ) -> List[str]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Optional[int]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> List[Any]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class UpperCamelCase_ ( unittest.TestCase ):
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase( self , A ) -> Tuple:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase( self , A ) -> Dict:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase( self , A ) -> Union[str, Any]:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def _lowercase( self ) -> Optional[int]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class UpperCamelCase_ ( __magic_name__ ):
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def _lowercase( self ) -> int:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class UpperCamelCase_ ( __magic_name__ ):
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def _lowercase( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class UpperCamelCase_ ( __magic_name__ ):
pass
self.assertEqual(find_labels(A ) , [] )
| 265 | 0 |
from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
'''simple docstring'''
from itertools import count
def __lowerCamelCase ( _lowercase = 5_0 ) -> int:
UpperCAmelCase : Any = [1] * min_block_length
for n in count(_lowercase ):
fill_count_functions.append(1 )
for block_length in range(_lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 265 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 |
'''simple docstring'''
from __future__ import annotations
import math
class UpperCamelCase_ :
def __init__( self , A ) -> None:
UpperCAmelCase : Optional[int] = size
# approximate the overall size of segment tree with given value
UpperCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCAmelCase : Any = [0 for i in range(0 , 4 * size )]
UpperCAmelCase : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update
def _lowercase( self , A ) -> int:
return idx * 2
def _lowercase( self , A ) -> int:
return idx * 2 + 1
def _lowercase( self , A , A , A , A ) -> None:
if left_element == right_element:
UpperCAmelCase : str = a[left_element - 1]
else:
UpperCAmelCase : Tuple = (left_element + right_element) // 2
self.build(self.left(A ) , A , A , A )
self.build(self.right(A ) , mid + 1 , A , A )
UpperCAmelCase : str = max(
self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] )
def _lowercase( self , A , A , A , A , A , A ) -> bool:
if self.flag[idx] is True:
UpperCAmelCase : Optional[Any] = self.lazy[idx]
UpperCAmelCase : int = False
if left_element != right_element:
UpperCAmelCase : List[str] = self.lazy[idx]
UpperCAmelCase : Optional[Any] = self.lazy[idx]
UpperCAmelCase : List[str] = True
UpperCAmelCase : int = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCAmelCase : Optional[Any] = val
if left_element != right_element:
UpperCAmelCase : Tuple = val
UpperCAmelCase : int = val
UpperCAmelCase : Any = True
UpperCAmelCase : str = True
return True
UpperCAmelCase : str = (left_element + right_element) // 2
self.update(self.left(A ) , A , A , A , A , A )
self.update(self.right(A ) , mid + 1 , A , A , A , A )
UpperCAmelCase : List[str] = max(
self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] )
return True
def _lowercase( self , A , A , A , A , A ) -> int | float:
if self.flag[idx] is True:
UpperCAmelCase : Any = self.lazy[idx]
UpperCAmelCase : Any = False
if left_element != right_element:
UpperCAmelCase : Optional[Any] = self.lazy[idx]
UpperCAmelCase : Tuple = self.lazy[idx]
UpperCAmelCase : List[str] = True
UpperCAmelCase : Tuple = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCAmelCase : Dict = (left_element + right_element) // 2
UpperCAmelCase : List[Any] = self.query(self.left(A ) , A , A , A , A )
UpperCAmelCase : str = self.query(self.right(A ) , mid + 1 , A , A , A )
return max(A , A )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , A , A ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
a : Optional[Any] = 1_5
a : Union[str, Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 265 | 0 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Dict = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
lowercase : str = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
lowercase : List[str] = {
"""ctrl""": 256,
}
lowercase : int = {
"""Pregnancy""": 168629,
"""Christianity""": 7675,
"""Explain""": 106423,
"""Fitness""": 63440,
"""Saving""": 63163,
"""Ask""": 27171,
"""Ass""": 95985,
"""Joke""": 163509,
"""Questions""": 45622,
"""Thoughts""": 49605,
"""Retail""": 52342,
"""Feminism""": 164338,
"""Writing""": 11992,
"""Atheism""": 192263,
"""Netflix""": 48616,
"""Computing""": 39639,
"""Opinion""": 43213,
"""Alone""": 44967,
"""Funny""": 58917,
"""Gaming""": 40358,
"""Human""": 4088,
"""India""": 1331,
"""Joker""": 77138,
"""Diet""": 36206,
"""Legal""": 11859,
"""Norman""": 4939,
"""Tip""": 72689,
"""Weight""": 52343,
"""Movies""": 46273,
"""Running""": 23425,
"""Science""": 2090,
"""Horror""": 37793,
"""Confession""": 60572,
"""Finance""": 12250,
"""Politics""": 16360,
"""Scary""": 191985,
"""Support""": 12654,
"""Technologies""": 32516,
"""Teenage""": 66160,
"""Event""": 32769,
"""Learned""": 67460,
"""Notion""": 182770,
"""Wikipedia""": 37583,
"""Books""": 6665,
"""Extract""": 76050,
"""Confessions""": 102701,
"""Conspiracy""": 75932,
"""Links""": 63674,
"""Narcissus""": 150425,
"""Relationship""": 54766,
"""Relationships""": 134796,
"""Reviews""": 41671,
"""News""": 4256,
"""Translation""": 26820,
"""multilingual""": 128406,
}
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : List[Any] = set()
lowercase : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] = char
lowercase : Dict = set(SCREAMING_SNAKE_CASE__ )
return pairs
class __snake_case ( lowerCAmelCase ):
_a : int= VOCAB_FILES_NAMES
_a : int= PRETRAINED_VOCAB_FILES_MAP
_a : List[str]= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : int= CONTROL_CODES
def __init__( self ,snake_case ,snake_case ,snake_case="<unk>" ,**snake_case ):
'''simple docstring'''
super().__init__(unk_token=snake_case ,**snake_case )
with open(snake_case ,encoding="""utf-8""" ) as vocab_handle:
lowercase : Union[str, Any] = json.load(snake_case )
lowercase : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(snake_case ,encoding="""utf-8""" ) as merges_handle:
lowercase : int = merges_handle.read().split("""\n""" )[1:-1]
lowercase : Dict = [tuple(merge.split() ) for merge in merges]
lowercase : Optional[Any] = dict(zip(snake_case ,range(len(snake_case ) ) ) )
lowercase : Union[str, Any] = {}
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return len(self.encoder )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : Union[str, Any] = tuple(snake_case )
lowercase : Tuple = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
lowercase : Tuple = get_pairs(snake_case )
if not pairs:
return token
while True:
lowercase : Tuple = min(snake_case ,key=lambda snake_case : self.bpe_ranks.get(snake_case ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : List[str] = bigram
lowercase : int = []
lowercase : str = 0
while i < len(snake_case ):
try:
lowercase : str = word.index(snake_case ,snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : int = j
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : str = tuple(snake_case )
lowercase : List[Any] = new_word
if len(snake_case ) == 1:
break
else:
lowercase : Any = get_pairs(snake_case )
lowercase : int = """@@ """.join(snake_case )
lowercase : List[Any] = word[:-4]
lowercase : Optional[int] = word
return word
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[str] = []
lowercase : str = re.findall(r"""\S+\n?""" ,snake_case )
for token in words:
split_tokens.extend(list(self.bpe(snake_case ).split(""" """ ) ) )
return split_tokens
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.encoder.get(snake_case ,self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.decoder.get(snake_case ,self.unk_token )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[str] = """ """.join(snake_case ).replace("""@@ """ ,"""""" ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase : Dict = os.path.join(
snake_case ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase : int = os.path.join(
snake_case ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(snake_case ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=snake_case ,ensure_ascii=snake_case ) + """\n""" )
lowercase : Union[str, Any] = 0
with open(snake_case ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
lowercase : List[Any] = token_index
writer.write(""" """.join(snake_case ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 20 |
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( _lowercase , _lowercase ) -> Image:
def brightness(_lowercase ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_lowercase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
a : Optional[Any] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 265 | 0 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase=0.0_1, lowerCamelCase=10_00) -> Dict:
"""simple docstring"""
_lowercase : List[str] = p_stop
_lowercase : Tuple = max_length
def __iter__( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = 0
_lowercase : Union[str, Any] = False
while not stop and count < self.max_length:
yield count
count += 1
_lowercase : int = random.random() < self.p_stop
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=True) -> Optional[int]:
"""simple docstring"""
_lowercase : Dict = [
BatchSamplerShard(lowerCamelCase, 2, lowerCamelCase, split_batches=lowerCamelCase, even_batches=lowerCamelCase)
for i in range(2)
]
_lowercase : str = [list(lowerCamelCase) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCamelCase) for shard in batch_sampler_shards], [len(lowerCamelCase) for e in expected])
self.assertListEqual(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[str] = BatchSampler(range(24), batch_size=3, drop_last=lowerCamelCase)
_lowercase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase)
_lowercase : Union[str, Any] = BatchSampler(range(24), batch_size=3, drop_last=lowerCamelCase)
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowercase : Union[str, Any] = BatchSampler(range(21), batch_size=3, drop_last=lowerCamelCase)
_lowercase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase)
_lowercase : Dict = BatchSampler(range(21), batch_size=3, drop_last=lowerCamelCase)
_lowercase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowercase : Any = BatchSampler(range(22), batch_size=3, drop_last=lowerCamelCase)
_lowercase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = BatchSampler(range(22), batch_size=3, drop_last=lowerCamelCase)
_lowercase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowercase : Union[str, Any] = BatchSampler(range(20), batch_size=3, drop_last=lowerCamelCase)
_lowercase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase)
_lowercase : Union[str, Any] = BatchSampler(range(20), batch_size=3, drop_last=lowerCamelCase)
_lowercase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase)
# Check the shards when the dataset is very small.
_lowercase : Union[str, Any] = BatchSampler(range(2), batch_size=3, drop_last=lowerCamelCase)
_lowercase : Dict = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = BatchSampler(range(2), batch_size=3, drop_last=lowerCamelCase)
_lowercase : Dict = [[], []]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = BatchSampler(range(24), batch_size=4, drop_last=lowerCamelCase)
_lowercase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, split_batches=lowerCamelCase)
_lowercase : List[str] = BatchSampler(range(24), batch_size=4, drop_last=lowerCamelCase)
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, split_batches=lowerCamelCase)
# Check the shards when the dataset is not a round multiple of batch size.
_lowercase : Union[str, Any] = BatchSampler(range(22), batch_size=4, drop_last=lowerCamelCase)
_lowercase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, split_batches=lowerCamelCase)
_lowercase : Optional[Any] = BatchSampler(range(22), batch_size=4, drop_last=lowerCamelCase)
_lowercase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, split_batches=lowerCamelCase)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowercase : Any = BatchSampler(range(21), batch_size=4, drop_last=lowerCamelCase)
_lowercase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, split_batches=lowerCamelCase)
_lowercase : Tuple = BatchSampler(range(21), batch_size=4, drop_last=lowerCamelCase)
_lowercase : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, split_batches=lowerCamelCase)
# Check the shards when the dataset is very small.
_lowercase : Union[str, Any] = BatchSampler(range(2), batch_size=4, drop_last=lowerCamelCase)
_lowercase : int = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, split_batches=lowerCamelCase)
_lowercase : Dict = BatchSampler(range(2), batch_size=4, drop_last=lowerCamelCase)
_lowercase : Dict = [[], []]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, split_batches=lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[str] = BatchSampler(range(24), batch_size=3, drop_last=lowerCamelCase)
_lowercase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, even_batches=lowerCamelCase)
_lowercase : Any = BatchSampler(range(24), batch_size=3, drop_last=lowerCamelCase)
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, even_batches=lowerCamelCase)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowercase : Any = BatchSampler(range(21), batch_size=3, drop_last=lowerCamelCase)
_lowercase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, even_batches=lowerCamelCase)
_lowercase : Optional[Any] = BatchSampler(range(21), batch_size=3, drop_last=lowerCamelCase)
_lowercase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, even_batches=lowerCamelCase)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowercase : str = BatchSampler(range(22), batch_size=3, drop_last=lowerCamelCase)
_lowercase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, even_batches=lowerCamelCase)
_lowercase : str = BatchSampler(range(22), batch_size=3, drop_last=lowerCamelCase)
_lowercase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, even_batches=lowerCamelCase)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowercase : List[Any] = BatchSampler(range(20), batch_size=3, drop_last=lowerCamelCase)
_lowercase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, even_batches=lowerCamelCase)
_lowercase : Dict = BatchSampler(range(20), batch_size=3, drop_last=lowerCamelCase)
_lowercase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, even_batches=lowerCamelCase)
# Check the shards when the dataset is very small.
_lowercase : Union[str, Any] = BatchSampler(range(2), batch_size=3, drop_last=lowerCamelCase)
_lowercase : int = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, even_batches=lowerCamelCase)
_lowercase : Dict = BatchSampler(range(2), batch_size=3, drop_last=lowerCamelCase)
_lowercase : Optional[int] = [[], []]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, even_batches=lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : str = BatchSampler(range(24), batch_size=4, drop_last=lowerCamelCase)
_lowercase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, split_batches=lowerCamelCase, even_batches=lowerCamelCase)
_lowercase : Optional[Any] = BatchSampler(range(24), batch_size=4, drop_last=lowerCamelCase)
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, split_batches=lowerCamelCase, even_batches=lowerCamelCase)
# Check the shards when the dataset is not a round multiple of batch size.
_lowercase : int = BatchSampler(range(22), batch_size=4, drop_last=lowerCamelCase)
_lowercase : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, split_batches=lowerCamelCase, even_batches=lowerCamelCase)
_lowercase : List[str] = BatchSampler(range(22), batch_size=4, drop_last=lowerCamelCase)
_lowercase : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, split_batches=lowerCamelCase, even_batches=lowerCamelCase)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowercase : List[str] = BatchSampler(range(21), batch_size=4, drop_last=lowerCamelCase)
_lowercase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, split_batches=lowerCamelCase, even_batches=lowerCamelCase)
_lowercase : Any = BatchSampler(range(21), batch_size=4, drop_last=lowerCamelCase)
_lowercase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, split_batches=lowerCamelCase, even_batches=lowerCamelCase)
# Check the shards when the dataset is very small.
_lowercase : Any = BatchSampler(range(2), batch_size=4, drop_last=lowerCamelCase)
_lowercase : int = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, split_batches=lowerCamelCase, even_batches=lowerCamelCase)
_lowercase : str = BatchSampler(range(2), batch_size=4, drop_last=lowerCamelCase)
_lowercase : Optional[int] = [[], []]
self.check_batch_sampler_shards(lowerCamelCase, lowerCamelCase, split_batches=lowerCamelCase, even_batches=lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_lowercase : List[str] = [BatchSamplerShard(lowerCamelCase, 2, lowerCamelCase, even_batches=lowerCamelCase) for i in range(2)]
self.assertEqual(len(batch_sampler_shards[0]), 3)
self.assertEqual(len(batch_sampler_shards[1]), 2)
self.assertListEqual(list(batch_sampler_shards[0]), [[0, 1, 2], [5, 6, 7, 8], [12, 13]])
self.assertListEqual(list(batch_sampler_shards[1]), [[3, 4], [9, 10, 11]])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=2, lowerCamelCase=False) -> str:
"""simple docstring"""
random.seed(lowerCamelCase)
_lowercase : List[str] = list(lowerCamelCase)
_lowercase : Optional[int] = [
IterableDatasetShard(
lowerCamelCase, batch_size=lowerCamelCase, drop_last=lowerCamelCase, num_processes=lowerCamelCase, process_index=lowerCamelCase, split_batches=lowerCamelCase, )
for i in range(lowerCamelCase)
]
_lowercase : Dict = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCamelCase)
iterable_dataset_lists.append(list(lowerCamelCase))
_lowercase : Tuple = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_lowercase : str = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase))
self.assertTrue(len(lowerCamelCase) % shard_batch_size == 0)
_lowercase : Tuple = []
for idx in range(0, len(lowerCamelCase), lowerCamelCase):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCamelCase) < len(lowerCamelCase):
reference += reference
self.assertListEqual(lowerCamelCase, reference[: len(lowerCamelCase)])
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[Any] = 42
_lowercase : int = RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCamelCase, lowerCamelCase, batch_size=4, drop_last=lowerCamelCase, split_batches=lowerCamelCase)
self.check_iterable_dataset_shards(lowerCamelCase, lowerCamelCase, batch_size=4, drop_last=lowerCamelCase, split_batches=lowerCamelCase)
self.check_iterable_dataset_shards(lowerCamelCase, lowerCamelCase, batch_size=4, drop_last=lowerCamelCase, split_batches=lowerCamelCase)
self.check_iterable_dataset_shards(lowerCamelCase, lowerCamelCase, batch_size=4, drop_last=lowerCamelCase, split_batches=lowerCamelCase)
# Edge case with a very small dataset
_lowercase : Union[str, Any] = RandomIterableDataset(max_length=2)
self.check_iterable_dataset_shards(lowerCamelCase, lowerCamelCase, batch_size=4, drop_last=lowerCamelCase, split_batches=lowerCamelCase)
self.check_iterable_dataset_shards(lowerCamelCase, lowerCamelCase, batch_size=4, drop_last=lowerCamelCase, split_batches=lowerCamelCase)
self.check_iterable_dataset_shards(lowerCamelCase, lowerCamelCase, batch_size=4, drop_last=lowerCamelCase, split_batches=lowerCamelCase)
self.check_iterable_dataset_shards(lowerCamelCase, lowerCamelCase, batch_size=4, drop_last=lowerCamelCase, split_batches=lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = BatchSampler(range(16), batch_size=4, drop_last=lowerCamelCase)
_lowercase : str = SkipBatchSampler(lowerCamelCase, 2)
self.assertListEqual(list(lowerCamelCase), [[8, 9, 10, 11], [12, 13, 14, 15]])
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = SkipDataLoader(list(range(16)), batch_size=4, skip_batches=2)
self.assertListEqual([t.tolist() for t in dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]])
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[Any] = DataLoader(list(range(16)), batch_size=4)
_lowercase : Optional[int] = skip_first_batches(lowerCamelCase, num_batches=2)
self.assertListEqual([t.tolist() for t in new_dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]])
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[Any] = DataLoaderShard(list(range(16)), batch_size=4)
for idx, _ in enumerate(lowerCamelCase):
self.assertEqual(dataloader.end_of_dataloader, idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCamelCase):
self.assertEqual(dataloader.end_of_dataloader, idx == 3)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
Accelerator()
_lowercase : Optional[Any] = DataLoaderDispatcher(range(16), batch_size=4)
for idx, _ in enumerate(lowerCamelCase):
self.assertEqual(dataloader.end_of_dataloader, idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCamelCase):
self.assertEqual(dataloader.end_of_dataloader, idx == 3)
| 21 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Optional[int]:
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : Dict = use_input_mask
UpperCAmelCase : str = use_token_type_ids
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : int = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : List[str] = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : str = num_labels
UpperCAmelCase : Optional[int] = num_choices
UpperCAmelCase : Dict = scope
UpperCAmelCase : Union[str, Any] = vocab_size - 1
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowercase( self ) -> Optional[Any]:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase : Any = True
return config, input_ids, input_mask, token_labels
def _lowercase( self , A , A , A ) -> int:
UpperCAmelCase : str = GPTNeoXModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[str] = model(A , attention_mask=A )
UpperCAmelCase : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A ) -> Optional[int]:
UpperCAmelCase : str = True
UpperCAmelCase : Optional[Any] = GPTNeoXModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A ) -> List[str]:
UpperCAmelCase : Tuple = GPTNeoXForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : str = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A ) -> Tuple:
UpperCAmelCase : List[str] = self.num_labels
UpperCAmelCase : Any = GPTNeoXForQuestionAnswering(A )
model.to(A )
model.eval()
UpperCAmelCase : str = model(A , attention_mask=A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase( self , A , A , A , A ) -> int:
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A , A ) -> str:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Tuple = GPTNeoXForTokenClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase( self , A , A , A ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : str = GPTNeoXForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : List[str] = model(A , attention_mask=A , use_cache=A )
UpperCAmelCase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A )
UpperCAmelCase : Any = output_from_no_past["""hidden_states"""][0]
UpperCAmelCase : List[str] = model(
A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = GPTNeoXModelTester(self )
UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A , A , A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(A , A , A )
def _lowercase( self ) -> Optional[Any]:
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(A , A , A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*A )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def _lowercase( self ) -> Any:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _lowercase( self ) -> Optional[int]:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Dict = GPTNeoXModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : Any = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = GPTNeoXModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCAmelCase : int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(A )
UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase : List[str] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCAmelCase : Union[str, Any] = model.generate(**A , do_sample=A , max_new_tokens=20 )
UpperCAmelCase : Tuple = tokenizer.batch_decode(A )[0]
self.assertEqual(A , A )
| 265 | 0 |
'''simple docstring'''
import logging
from transformers import PretrainedConfig
__SCREAMING_SNAKE_CASE :Tuple = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE :str = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = """bertabs"""
def __init__( self : Union[str, Any] , snake_case_ : Any=3_0_5_2_2 , snake_case_ : Optional[int]=5_1_2 , snake_case_ : List[Any]=6 , snake_case_ : List[Any]=5_1_2 , snake_case_ : Union[str, Any]=8 , snake_case_ : Dict=5_1_2 , snake_case_ : Optional[Any]=0.2 , snake_case_ : Tuple=6 , snake_case_ : Tuple=7_6_8 , snake_case_ : Union[str, Any]=8 , snake_case_ : Union[str, Any]=2_0_4_8 , snake_case_ : Union[str, Any]=0.2 , **snake_case_ : List[str] , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_pos
_UpperCAmelCase = enc_layers
_UpperCAmelCase = enc_hidden_size
_UpperCAmelCase = enc_heads
_UpperCAmelCase = enc_ff_size
_UpperCAmelCase = enc_dropout
_UpperCAmelCase = dec_layers
_UpperCAmelCase = dec_hidden_size
_UpperCAmelCase = dec_heads
_UpperCAmelCase = dec_ff_size
_UpperCAmelCase = dec_dropout
| 22 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
else:
return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> float:
if b < 0:
return 1 / actual_power(_lowercase , _lowercase )
return actual_power(_lowercase , _lowercase )
if __name__ == "__main__":
print(power(-2, -3))
| 265 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : List[Any] , __snake_case : Any , __snake_case : List[str]=3 , __snake_case : str=32 , __snake_case : Tuple=3 , __snake_case : Dict=10 , __snake_case : List[Any]=[10, 20, 30, 40] , __snake_case : List[Any]=[1, 1, 2, 1] , __snake_case : Any=True , __snake_case : Dict=True , __snake_case : Any="relu" , __snake_case : Union[str, Any]=3 , __snake_case : List[str]=None , ) -> List[str]:
UpperCAmelCase : int = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : List[str] = hidden_sizes
UpperCAmelCase : int = depths
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : str = scope
UpperCAmelCase : str = len(__snake_case )
def A ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def A ( self : int ) -> Tuple:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : List[Any] ) -> List[Any]:
UpperCAmelCase : int = TFResNetModel(config=__snake_case )
UpperCAmelCase : Tuple = model(__snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : str ) -> str:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Tuple = TFResNetForImageClassification(__snake_case )
UpperCAmelCase : Tuple = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : str ) -> Tuple:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def A ( self : Any ) -> List[Any]:
UpperCAmelCase : List[Any] = TFResNetModelTester(self )
UpperCAmelCase : str = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def A ( self : List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Dict ) -> Dict:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def A ( self : str ) -> Dict:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def A ( self : Dict ) -> Any:
pass
def A ( self : Optional[int] ) -> str:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(__snake_case )
UpperCAmelCase : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def A ( self : Any ) -> List[Any]:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : Dict ) -> str:
def check_hidden_states_output(__snake_case : int , __snake_case : List[Any] , __snake_case : Optional[int] ):
UpperCAmelCase : List[Any] = model_class(__snake_case )
UpperCAmelCase : List[str] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
UpperCAmelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(__snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[str] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : int = layer_type
UpperCAmelCase : Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def A ( self : Tuple ) -> int:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def A ( self : str ) -> Optional[Any]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[int] = TFResNetModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : str ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : List[str] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Tuple = image_processor(images=__snake_case , return_tensors='''tf''' )
# forward pass
UpperCAmelCase : Dict = model(**__snake_case )
# verify the logits
UpperCAmelCase : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __snake_case )
UpperCAmelCase : Any = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __snake_case , atol=1E-4 ) )
| 23 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : Any = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = AlbertTokenizer
lowercase = AlbertTokenizerFast
lowercase = True
lowercase = True
lowercase = True
def _lowercase( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[int] = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self , A ) -> int:
UpperCAmelCase : Optional[int] = """this is a test"""
UpperCAmelCase : Dict = """this is a test"""
return input_text, output_text
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = """<pad>"""
UpperCAmelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(A ) , 30000 )
def _lowercase( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : List[str] = self.get_rust_tokenizer()
UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé."""
UpperCAmelCase : str = tokenizer.tokenize(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A )
UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = AlbertTokenizer(A )
UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" )
UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase( self ) -> Dict:
# fmt: off
UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 265 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
snake_case_ = TypeVar('T')
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
def __init__(self : List[Any] , a__ : list[T] , a__ : Callable[[T, T], T] ):
"""simple docstring"""
__snake_case = None
__snake_case = len(a__ )
__snake_case = [any_type for _ in range(self.N )] + arr
__snake_case = fnc
self.build()
def a (self : Dict ):
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1 ):
__snake_case = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def a (self : Any , a__ : int , a__ : T ):
"""simple docstring"""
p += self.N
__snake_case = v
while p > 1:
__snake_case = p // 2
__snake_case = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def a (self : int , a__ : int , a__ : int ): # noqa: E741
"""simple docstring"""
__snake_case , __snake_case = l + self.N, r + self.N
__snake_case = None
while l <= r:
if l % 2 == 1:
__snake_case = self.st[l] if res is None else self.fn(a__ , self.st[l] )
if r % 2 == 0:
__snake_case = self.st[r] if res is None else self.fn(a__ , self.st[r] )
__snake_case , __snake_case = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
snake_case_ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
snake_case_ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
snake_case_ = SegmentTree(test_array, min)
snake_case_ = SegmentTree(test_array, max)
snake_case_ = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase__ ( ) -> None:
for i in range(len(snake_case_ ) ):
for j in range(snake_case_ , len(snake_case_ ) ):
__snake_case = reduce(snake_case_ , test_array[i : j + 1] )
__snake_case = reduce(snake_case_ , test_array[i : j + 1] )
__snake_case = reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case_ , snake_case_ )
assert max_range == max_segment_tree.query(snake_case_ , snake_case_ )
assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ )
test_all_segments()
for index, value in test_updates.items():
snake_case_ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 24 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = StableDiffusionDiffEditPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase = frozenset([] )
def _lowercase( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , )
UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , )
UpperCAmelCase : List[Any] = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , )
torch.manual_seed(0 )
UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
UpperCAmelCase : Optional[Any] = CLIPTextModel(A )
UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowercase( self , A , A=0 ) -> Optional[Any]:
UpperCAmelCase : Any = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : List[Any] = torch.manual_seed(A )
else:
UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : int = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self , A , A=0 ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : Any = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self , A , A=0 ) -> str:
UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : str = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self ) -> List[Any]:
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
UpperCAmelCase : Dict = self.get_dummy_components()
UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A , A , A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase : Any = self.get_dummy_inputs(A )
UpperCAmelCase : Optional[Any] = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
UpperCAmelCase : Dict = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A , A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A )
UpperCAmelCase : Tuple = pipe_loaded(**A )[0]
UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max()
self.assertLess(A , 1e-4 )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[int] = """cpu"""
UpperCAmelCase : Optional[Any] = self.get_dummy_components()
UpperCAmelCase : List[str] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(A )
UpperCAmelCase : List[Any] = pipe.generate_mask(**A )
UpperCAmelCase : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase : Optional[int] = np.array([0] * 9 )
UpperCAmelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = """cpu"""
UpperCAmelCase : List[str] = self.get_dummy_components()
UpperCAmelCase : Optional[Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(A )
UpperCAmelCase : List[str] = pipe.invert(**A ).images
UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase : Dict = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
UpperCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
def _lowercase( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def _lowercase( self ) -> int:
UpperCAmelCase : List[Any] = """cpu"""
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""}
UpperCAmelCase : int = DPMSolverMultistepScheduler(**A )
UpperCAmelCase : int = DPMSolverMultistepInverseScheduler(**A )
UpperCAmelCase : List[str] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(A )
UpperCAmelCase : Any = pipe.invert(**A ).images
UpperCAmelCase : Dict = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase : Any = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _lowercase( cls ) -> Dict:
UpperCAmelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) )
UpperCAmelCase : List[str] = raw_image
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Dict = torch.manual_seed(0 )
UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa )
UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Tuple = """a bowl of fruit"""
UpperCAmelCase : List[Any] = """a bowl of pears"""
UpperCAmelCase : str = pipe.generate_mask(
image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , )
UpperCAmelCase : Tuple = pipe.invert(
prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents
UpperCAmelCase : Any = pipe(
prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
UpperCAmelCase : List[str] = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa )
UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : int = """a bowl of fruit"""
UpperCAmelCase : int = """a bowl of pears"""
UpperCAmelCase : str = pipe.generate_mask(
image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , )
UpperCAmelCase : Any = pipe.invert(
prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents
UpperCAmelCase : str = pipe(
prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
UpperCAmelCase : Tuple = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 265 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = params
SCREAMING_SNAKE_CASE__ : List[str] = np.array(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = np.array([len(SCREAMING_SNAKE_CASE__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__(self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__(self ) -> int:
"""simple docstring"""
return len(self.lengths )
def __magic_name__ (self ) -> str:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.params.max_model_input_size
SCREAMING_SNAKE_CASE__ : Dict = self.lengths > max_len
logger.info(F'''Splitting {sum(SCREAMING_SNAKE_CASE__ )} too long sequences.''' )
def divide_chunks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return [l[i : i + n] for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Any = []
if self.params.mlm:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
SCREAMING_SNAKE_CASE__ : int = np.insert(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ )
if sub_s[-1] != sep_id:
SCREAMING_SNAKE_CASE__ : str = np.insert(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(SCREAMING_SNAKE_CASE__ )
new_tok_ids.extend(SCREAMING_SNAKE_CASE__ )
new_lengths.extend([len(SCREAMING_SNAKE_CASE__ ) for l in sub_seqs] )
SCREAMING_SNAKE_CASE__ : Dict = np.array(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = np.array(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = len(self )
SCREAMING_SNAKE_CASE__ : List[Any] = self.lengths > 11
SCREAMING_SNAKE_CASE__ : Dict = self.token_ids[indices]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.lengths[indices]
SCREAMING_SNAKE_CASE__ : str = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.params.special_tok_ids["""unk_token"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(self )
SCREAMING_SNAKE_CASE__ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
SCREAMING_SNAKE_CASE__ : List[str] = (unk_occs / self.lengths) < 0.5
SCREAMING_SNAKE_CASE__ : List[str] = self.token_ids[indices]
SCREAMING_SNAKE_CASE__ : List[Any] = self.lengths[indices]
SCREAMING_SNAKE_CASE__ : List[str] = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [t[0] for t in batch]
SCREAMING_SNAKE_CASE__ : Tuple = [t[1] for t in batch]
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
# Max for paddings
SCREAMING_SNAKE_CASE__ : int = max(SCREAMING_SNAKE_CASE__ )
# Pad token ids
if self.params.mlm:
SCREAMING_SNAKE_CASE__ : Any = self.params.special_tok_ids["""pad_token"""]
else:
SCREAMING_SNAKE_CASE__ : List[str] = self.params.special_tok_ids["""unk_token"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [list(t.astype(SCREAMING_SNAKE_CASE__ ) ) + [pad_idx] * (max_seq_len_ - len(SCREAMING_SNAKE_CASE__ )) for t in token_ids]
assert len(tk_ ) == len(SCREAMING_SNAKE_CASE__ )
assert all(len(SCREAMING_SNAKE_CASE__ ) == max_seq_len_ for t in tk_ )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(SCREAMING_SNAKE_CASE__ ) # (bs)
return tk_t, lg_t
| 25 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : Dict = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def __lowerCamelCase ( _lowercase ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape
UpperCAmelCase : Dict = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase : Optional[Any] = emb.weight.data
return lin_layer
def __lowerCamelCase ( _lowercase , _lowercase=None ) -> List[Any]:
UpperCAmelCase : Dict = {}
for old_key in state_dict.keys():
UpperCAmelCase : str = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase : int = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
UpperCAmelCase : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
UpperCAmelCase : str = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
UpperCAmelCase : Tuple = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
UpperCAmelCase : List[Any] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
UpperCAmelCase : Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase : str = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
UpperCAmelCase : Optional[int] = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
UpperCAmelCase : str = state_dict[old_key]
return new_dict
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ) -> Tuple:
UpperCAmelCase : Any = []
UpperCAmelCase : Dict = 0
os.makedirs(_lowercase , exist_ok=_lowercase )
for expert in range(_lowercase ):
UpperCAmelCase : str = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowercase ):
UpperCAmelCase : Optional[Any] = torch.load(_lowercase )["""model"""]
remove_ignore_keys_(_lowercase )
UpperCAmelCase : List[str] = rename_fairseq_keys(_lowercase , _lowercase )
UpperCAmelCase : Optional[Any] = os.path.join(
_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) )
torch.save(_lowercase , _lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowercase )[0]].dtype )
# Add the last block
UpperCAmelCase : Optional[Any] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) )
UpperCAmelCase : Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowercase )
UpperCAmelCase : Optional[Any] = rename_fairseq_keys(_lowercase , _lowercase )
UpperCAmelCase : int = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowercase ) == 1:
UpperCAmelCase : Dict = os.path.join(_lowercase , _lowercase )
torch.save(_lowercase , _lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowercase , _lowercase )
# Otherwise, let's build the index
UpperCAmelCase : Optional[int] = {}
for idx, shard in enumerate(_lowercase ):
UpperCAmelCase : List[str] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase ):05d}.bin''' )
UpperCAmelCase : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowercase , os.path.join(_lowercase , _lowercase ) )
for key in shard:
UpperCAmelCase : Tuple = shard_file
# Add the metadata
UpperCAmelCase : Any = {"""total_size""": total_size}
UpperCAmelCase : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowercase , _lowercase ) , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase : str = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + """\n"""
f.write(_lowercase )
return metadata, index
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a : int = parser.parse_args()
a , a : Any = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
a : str = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
a : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 265 | 0 |
from manim import *
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> Dict:
_A : List[Any] = Rectangle(height=0.5 , width=0.5 )
_A : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A : Optional[Any] = [mem.copy() for i in range(6 )]
_A : List[Any] = [mem.copy() for i in range(6 )]
_A : Optional[Any] = VGroup(*_a ).arrange(_a , buff=0 )
_A : Any = VGroup(*_a ).arrange(_a , buff=0 )
_A : Optional[int] = VGroup(_a , _a ).arrange(_a , buff=0 )
_A : Optional[Any] = Text("""CPU""" , font_size=24 )
_A : List[str] = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_A : Optional[Any] = [mem.copy() for i in range(4 )]
_A : Optional[int] = VGroup(*_a ).arrange(_a , buff=0 )
_A : Optional[Any] = Text("""GPU""" , font_size=24 )
_A : Union[str, Any] = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_A : str = [mem.copy() for i in range(6 )]
_A : int = VGroup(*_a ).arrange(_a , buff=0 )
_A : str = Text("""Model""" , font_size=24 )
_A : List[Any] = Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_A : List[str] = []
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_A : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
cpu_targs.append(_a )
_A : Union[str, Any] = [mem.copy() for i in range(6 )]
_A : str = VGroup(*_a ).arrange(_a , buff=0 )
_A : List[str] = Text("""Loaded Checkpoint""" , font_size=24 )
_A : Optional[int] = Group(_a , _a ).arrange(_a , aligned_edge=_a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_A : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A : int = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_A : List[str] = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_A : Optional[int] = MarkupText(
F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_a ) , Write(_a ) )
self.play(Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_A : Optional[int] = []
_A : Dict = []
for i, rect in enumerate(_a ):
_A : int = fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
first_animations.append(GrowFromCenter(_a , run_time=1 ) )
_A : Optional[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(*_a )
self.wait()
| 26 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
a : Union[str, Any] = torch.device("""cpu""")
def __lowerCamelCase ( ) -> Any:
UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
def __lowerCamelCase ( _lowercase ) -> Dict:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : Union[str, Any] = dct.pop(_lowercase )
UpperCAmelCase : str = val
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCAmelCase : Tuple = []
for k in state_dict.keys():
UpperCAmelCase : Dict = k
if ".pwconv" in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
UpperCAmelCase : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
UpperCAmelCase : Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
UpperCAmelCase : Optional[Any] = k_new.split(""".""" )
if ls[2].isdigit():
UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase : List[Any] = 1_0_0_0
UpperCAmelCase : List[str] = """huggingface/label-files"""
UpperCAmelCase : Tuple = """imagenet-1k-id2label.json"""
UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Tuple = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Tuple = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase : List[Any] = [3, 3, 6, 4]
UpperCAmelCase : int = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase : str = [3, 3, 9, 6]
UpperCAmelCase : str = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase : List[Any] = [4, 3, 1_0, 5]
UpperCAmelCase : Union[str, Any] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase : Any = [4, 4, 1_2, 6]
UpperCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase )
else:
UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" )
UpperCAmelCase : str = checkpoint
UpperCAmelCase : Tuple = create_rename_keys(_lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# load HuggingFace model
UpperCAmelCase : str = SwiftFormerForImageClassification(_lowercase ).eval()
hf_model.load_state_dict(_lowercase )
# prepare test inputs
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
UpperCAmelCase : List[str] = processor(images=_lowercase , return_tensors="""pt""" )
# compare outputs from both models
UpperCAmelCase : List[str] = get_expected_output(_lowercase )
UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
a : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 265 | 0 |
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 2_000_000 ):
__a : Dict = [0 for i in range(n + 1 )]
__a : Optional[int] = 1
__a : Optional[int] = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _SCREAMING_SNAKE_CASE ):
__a : Tuple = 1
__a : Tuple = 0
for i in range(_SCREAMING_SNAKE_CASE ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 27 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowerCamelCase ( ) -> Any:
raise RuntimeError("""CUDA out of memory.""" )
class UpperCamelCase_ ( nn.Module ):
def __init__( self ) -> Any:
super().__init__()
UpperCAmelCase : Tuple = nn.Linear(3 , 4 )
UpperCAmelCase : Tuple = nn.BatchNormad(4 )
UpperCAmelCase : int = nn.Linear(4 , 5 )
def _lowercase( self , A ) -> Any:
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(A , [128, 64, 32, 16, 8] )
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase , UpperCAmelCase : Optional[int] = mock_training_loop_function("""hello""" )
self.assertListEqual(A , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def _lowercase( self ) -> Any:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(A ):
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A , A ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(A ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def _lowercase( self ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated()
UpperCAmelCase : List[str] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , A )
UpperCAmelCase : Tuple = release_memory(A )
self.assertEqual(torch.cuda.memory_allocated() , A )
| 265 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
UpperCamelCase = int(number**0.5 )
return number == sq * sq
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__ ) -> tuple[int, int]:
"""simple docstring"""
UpperCamelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase = x_den * y_den * z_den
UpperCamelCase = gcd(A__ , A__ )
top //= hcf
bottom //= hcf
return top, bottom
def __lowerCamelCase ( A__ = 35 ) -> int:
"""simple docstring"""
UpperCamelCase = set()
UpperCamelCase = 42
UpperCamelCase = Fraction(0 )
UpperCamelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase = x_num * y_den + x_den * y_num
UpperCamelCase = x_den * y_den
UpperCamelCase = gcd(A__ , A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase = add_three(
A__ , A__ , A__ , A__ , A__ , A__ )
unique_s.add(A__ )
# n=2
UpperCamelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase = x_den * x_den * y_den * y_den
if is_sq(A__ ) and is_sq(A__ ):
UpperCamelCase = int(sqrt(A__ ) )
UpperCamelCase = int(sqrt(A__ ) )
UpperCamelCase = gcd(A__ , A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase = add_three(
A__ , A__ , A__ , A__ , A__ , A__ )
unique_s.add(A__ )
# n=-1
UpperCamelCase = x_num * y_num
UpperCamelCase = x_den * y_num + x_num * y_den
UpperCamelCase = gcd(A__ , A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase = add_three(
A__ , A__ , A__ , A__ , A__ , A__ )
unique_s.add(A__ )
# n=2
UpperCamelCase = x_num * x_num * y_num * y_num
UpperCamelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(A__ ) and is_sq(A__ ):
UpperCamelCase = int(sqrt(A__ ) )
UpperCamelCase = int(sqrt(A__ ) )
UpperCamelCase = gcd(A__ , A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase = add_three(
A__ , A__ , A__ , A__ , A__ , A__ )
unique_s.add(A__ )
for num, den in unique_s:
total += Fraction(A__ , A__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[int] = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 265 | 0 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCAmelCase = logging.getLogger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[Any] = '''summarization'''
_snake_case : Optional[int] = ['''loss''']
_snake_case : str = ROUGE_KEYS
_snake_case : Tuple = '''rouge2'''
def __init__( self , _UpperCamelCase , **_UpperCamelCase ) -> List[str]:
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCAmelCase_ : Union[str, Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(_UpperCamelCase , num_labels=_UpperCamelCase , mode=self.mode , **_UpperCamelCase )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
UpperCAmelCase_ : Optional[int] = Path(self.output_dir ) / 'metrics.json'
UpperCAmelCase_ : str = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Optional[int] = defaultdict(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = self.config.model_type
UpperCAmelCase_ : Any = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
UpperCAmelCase_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCAmelCase_ : Tuple = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
UpperCAmelCase_ : Union[str, Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCAmelCase_ : str = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
UpperCAmelCase_ : Any = get_git_info()['repo_sha']
UpperCAmelCase_ : int = hparams.num_workers
UpperCAmelCase_ : List[Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _UpperCamelCase ):
UpperCAmelCase_ : Dict = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCAmelCase_ : Dict = self.decoder_start_token_id
UpperCAmelCase_ : List[Any] = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
UpperCAmelCase_ : str = False
UpperCAmelCase_ : List[Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCAmelCase_ : Optional[int] = self.hparams.eval_max_gen_length
else:
UpperCAmelCase_ : Optional[Any] = self.model.config.max_length
UpperCAmelCase_ : List[str] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict[str, List[str]]:
UpperCAmelCase_ : Optional[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(_UpperCamelCase , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
UpperCAmelCase_ : Union[str, Any] = True
return readable_batch
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase ) -> List[str]:
return self.model(_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Dict = self.tokenizer.batch_decode(
_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
return lmap(str.strip , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : int = self.tokenizer.pad_token_id
UpperCAmelCase_ , UpperCAmelCase_ : Dict = batch['input_ids'], batch['attention_mask']
UpperCAmelCase_ : int = batch['labels']
if isinstance(self.model , _UpperCamelCase ):
UpperCAmelCase_ : str = self.model._shift_right(_UpperCamelCase )
else:
UpperCAmelCase_ : str = shift_tokens_right(_UpperCamelCase , _UpperCamelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCAmelCase_ : Any = decoder_input_ids
self.save_readable_batch(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = self(_UpperCamelCase , attention_mask=_UpperCamelCase , decoder_input_ids=_UpperCamelCase , use_cache=_UpperCamelCase )
UpperCAmelCase_ : Tuple = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCAmelCase_ : Any = nn.CrossEntropyLoss(ignore_index=_UpperCamelCase )
assert lm_logits.shape[-1] == self.vocab_size
UpperCAmelCase_ : Optional[int] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
UpperCAmelCase_ : Optional[Any] = nn.functional.log_softmax(_UpperCamelCase , dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = label_smoothed_nll_loss(
_UpperCamelCase , _UpperCamelCase , self.hparams.label_smoothing , ignore_index=_UpperCamelCase )
return (loss,)
@property
def __UpperCAmelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : List[Any] = self._step(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = dict(zip(self.loss_names , _UpperCamelCase ) )
# tokens per batch
UpperCAmelCase_ : Tuple = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
UpperCAmelCase_ : Dict = batch['input_ids'].shape[0]
UpperCAmelCase_ : Optional[int] = batch['input_ids'].eq(self.pad ).sum()
UpperCAmelCase_ : List[str] = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
return self._generative_step(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase="val" ) -> Dict:
self.step_count += 1
UpperCAmelCase_ : int = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
UpperCAmelCase_ : Optional[Any] = losses['loss']
UpperCAmelCase_ : Tuple = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
UpperCAmelCase_ : Tuple = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCAmelCase_ : torch.FloatTensor = torch.tensor(_UpperCamelCase ).type_as(_UpperCamelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_UpperCamelCase )
UpperCAmelCase_ : str = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
UpperCAmelCase_ : Optional[int] = self.step_count
self.metrics[prefix].append(_UpperCamelCase ) # callback writes this to self.metrics_save_path
UpperCAmelCase_ : int = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"{prefix}_loss": loss,
f"{prefix}_{self.val_metric}": metric_tensor,
}
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
return calculate_rouge(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> dict:
UpperCAmelCase_ : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCAmelCase_ : int = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=_UpperCamelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCAmelCase_ : Optional[Any] = (time.time() - ta) / batch['input_ids'].shape[0]
UpperCAmelCase_ : List[str] = self.ids_to_clean_text(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.ids_to_clean_text(batch['labels'] )
UpperCAmelCase_ : str = self._step(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = dict(zip(self.loss_names , _UpperCamelCase ) )
UpperCAmelCase_ : Dict = self.calc_generative_metrics(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = np.mean(lmap(_UpperCamelCase , _UpperCamelCase ) )
base_metrics.update(gen_time=_UpperCamelCase , gen_len=_UpperCamelCase , preds=_UpperCamelCase , target=_UpperCamelCase , **_UpperCamelCase )
return base_metrics
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
return self._generative_step(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict:
return self.validation_epoch_end(_UpperCamelCase , prefix='test' )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> SeqaSeqDataset:
UpperCAmelCase_ : int = self.n_obs[type_path]
UpperCAmelCase_ : List[str] = self.target_lens[type_path]
UpperCAmelCase_ : List[str] = self.dataset_class(
self.tokenizer , type_path=_UpperCamelCase , n_obs=_UpperCamelCase , max_target_length=_UpperCamelCase , **self.dataset_kwargs , )
return dataset
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ) -> DataLoader:
UpperCAmelCase_ : List[str] = self.get_dataset(_UpperCamelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCAmelCase_ : Optional[Any] = dataset.make_sortish_sampler(_UpperCamelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCamelCase , num_workers=self.num_workers , sampler=_UpperCamelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCAmelCase_ : str = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_UpperCamelCase , batch_sampler=_UpperCamelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCamelCase , num_workers=self.num_workers , sampler=_UpperCamelCase , )
def __UpperCAmelCase ( self ) -> DataLoader:
UpperCAmelCase_ : List[Any] = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=_UpperCamelCase )
return dataloader
def __UpperCAmelCase ( self ) -> DataLoader:
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def __UpperCAmelCase ( self ) -> DataLoader:
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> Dict:
BaseTransformer.add_model_specific_args(_UpperCamelCase , _UpperCamelCase )
add_generic_args(_UpperCamelCase , _UpperCamelCase )
parser.add_argument(
'--max_source_length' , default=1_0_2_4 , type=_UpperCamelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=5_6 , type=_UpperCamelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=1_4_2 , type=_UpperCamelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=1_4_2 , type=_UpperCamelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=_UpperCamelCase )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=_UpperCamelCase )
parser.add_argument('--max_tokens_per_batch' , type=_UpperCamelCase , default=_UpperCamelCase )
parser.add_argument('--logger_name' , type=_UpperCamelCase , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=_UpperCamelCase , default=5_0_0 , required=_UpperCamelCase , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=_UpperCamelCase , default='summarization' , required=_UpperCamelCase , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=_UpperCamelCase , default=0.0 , required=_UpperCamelCase )
parser.add_argument('--src_lang' , type=_UpperCamelCase , default='' , required=_UpperCamelCase )
parser.add_argument('--tgt_lang' , type=_UpperCamelCase , default='' , required=_UpperCamelCase )
parser.add_argument('--eval_beams' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase )
parser.add_argument(
'--val_metric' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=_UpperCamelCase , default=_UpperCamelCase , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=_UpperCamelCase , default=1 , required=_UpperCamelCase , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : int = '''translation'''
_snake_case : Dict = ['''loss''']
_snake_case : str = ['''bleu''']
_snake_case : str = '''bleu'''
def __init__( self , _UpperCamelCase , **_UpperCamelCase ) -> List[str]:
super().__init__(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : List[str] = hparams.src_lang
UpperCAmelCase_ : List[str] = hparams.tgt_lang
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> dict:
return calculate_bleu(_UpperCamelCase , _UpperCamelCase )
def lowercase__ ( __snake_case : Dict , __snake_case : List[Any]=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__snake_case )
check_output_dir(__snake_case , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCAmelCase_ : SummarizationModule = SummarizationModule(__snake_case )
else:
UpperCAmelCase_ : SummarizationModule = TranslationModule(__snake_case )
UpperCAmelCase_ : List[str] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
UpperCAmelCase_ : Dict = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ : Optional[Any] = os.environ.get('WANDB_PROJECT' , __snake_case )
UpperCAmelCase_ : int = WandbLogger(name=model.output_dir.name , project=__snake_case )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ : str = WandbLogger(name=model.output_dir.name , project=F"hf_{dataset}" )
if args.early_stopping_patience >= 0:
UpperCAmelCase_ : Any = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Dict = args.val_metric == 'loss'
UpperCAmelCase_ : pl.Trainer = generic_train(
__snake_case , __snake_case , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __snake_case ) , early_stopping_callback=__snake_case , logger=__snake_case , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
UpperCAmelCase_ : Tuple = ''
UpperCAmelCase_ : List[Any] = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=__snake_case ) )
if checkpoints:
UpperCAmelCase_ : int = checkpoints[-1]
UpperCAmelCase_ : Optional[Any] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
__UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase = parser.parse_args()
main(args)
| 29 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = LongformerTokenizer
lowercase = True
lowercase = LongformerTokenizerFast
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCAmelCase : int = dict(zip(A , range(len(A ) ) ) )
UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def _lowercase( self , **A ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , **A ) -> int:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = """lower newer"""
UpperCAmelCase : Optional[int] = """lower newer"""
return input_text, output_text
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase : Dict = """lower newer"""
UpperCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
UpperCAmelCase : Tuple = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokens + [tokenizer.unk_token]
UpperCAmelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A )
UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A )
UpperCAmelCase : List[str] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : List[str] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : str = self.get_tokenizer()
UpperCAmelCase : List[Any] = """Encode this sequence."""
UpperCAmelCase : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A , A )
UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A , A )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A , A )
# Testing spaces after special tokens
UpperCAmelCase : Union[str, Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
UpperCAmelCase : Union[str, Any] = """Encode <mask> sequence"""
UpperCAmelCase : Union[str, Any] = """Encode <mask>sequence"""
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Union[str, Any] = encoded.index(A )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A , A )
UpperCAmelCase : Tuple = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = encoded.index(A )
UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A , A )
def _lowercase( self ) -> Optional[int]:
pass
def _lowercase( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence."""
UpperCAmelCase : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
UpperCAmelCase : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def _lowercase( self ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""trim_offsets"""] , A )
def _lowercase( self ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase : int = f'''{text_of_1_token} {text_of_1_token}'''
UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[Any] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
| 265 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :str = IFInpaintingSuperResolutionPipeline
a :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
a :int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
a :Optional[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def _lowercase ( self : List[Any] ) -> List[str]:
return self._get_superresolution_dummy_components()
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 ) -> int:
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
lowercase_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase_ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
lowercase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
lowercase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def _lowercase ( self : str ) -> Union[str, Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _lowercase ( self : Tuple ) -> List[str]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _lowercase ( self : str ) -> Tuple:
self._test_save_load_local()
def _lowercase ( self : Union[str, Any] ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 30 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a : Tuple = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _lowercase( self , A , A , A ) -> Dict:
UpperCAmelCase : Union[str, Any] = ZeroShotClassificationPipeline(
model=A , tokenizer=A , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _lowercase( self , A , A ) -> Optional[int]:
UpperCAmelCase : Dict = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
# No kwarg
UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
UpperCAmelCase : str = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
UpperCAmelCase : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
UpperCAmelCase : Dict = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
# https://github.com/huggingface/transformers/issues/13846
UpperCAmelCase : str = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
A , [
{"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]}
for i in range(1 )
] , )
UpperCAmelCase : List[str] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
A , [
{"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]}
for i in range(2 )
] , )
with self.assertRaises(A ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(A ):
classifier(A , candidate_labels="""politics""" )
with self.assertRaises(A ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(A ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=A )
with self.assertRaises(A ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(A ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A , )
self.run_entailment_id(A )
def _lowercase( self , A ) -> Any:
UpperCAmelCase : Tuple = zero_shot_classifier.model.config
UpperCAmelCase : Union[str, Any] = config.labelaid
UpperCAmelCase : Tuple = zero_shot_classifier.entailment_id
UpperCAmelCase : Any = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
UpperCAmelCase : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCAmelCase : Any = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCAmelCase : List[str] = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
UpperCAmelCase : Tuple = original_labelaid
self.assertEqual(A , zero_shot_classifier.entailment_id )
@require_torch
def _lowercase( self ) -> str:
UpperCAmelCase : int = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
UpperCAmelCase : Union[str, Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
UpperCAmelCase : List[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[int] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
UpperCAmelCase : Optional[int] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
UpperCAmelCase : str = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
UpperCAmelCase : Tuple = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
UpperCAmelCase : Any = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 265 | 0 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__SCREAMING_SNAKE_CASE : Dict = get_logger(__name__)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[str] , A : Optional[str] = None ):
_UpperCAmelCase : Dict = (
os.path.join(A , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_UpperCAmelCase : Union[str, Any] = Extractor
def _A ( self : Tuple , A : str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_UpperCAmelCase : Dict = os.path.abspath(A )
return os.path.join(self.extract_dir , hash_url_to_filename(A ) )
def _A ( self : int , A : str , A : bool ):
return force_extract or (
not os.path.isfile(A ) and not (os.path.isdir(A ) and os.listdir(A ))
)
def _A ( self : Optional[int] , A : str , A : bool = False ):
_UpperCAmelCase : Union[str, Any] = self.extractor.infer_extractor_format(A )
if not extractor_format:
return input_path
_UpperCAmelCase : Optional[Any] = self._get_output_path(A )
if self._do_extract(A , A ):
self.extractor.extract(A , A , A )
return output_path
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
@classmethod
@abstractmethod
def _A ( cls : str , A : Union[Path, str] , **A : Dict ):
...
@staticmethod
@abstractmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
...
class lowerCamelCase_ (snake_case__ , snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[bytes] = []
@staticmethod
def _A ( A : Union[Path, str] , A : int ):
with open(A , "rb" ) as f:
return f.read(A )
@classmethod
def _A ( cls : Any , A : Union[Path, str] , A : bytes = b"" ):
if not magic_number:
_UpperCAmelCase : Any = max(len(A ) for cls_magic_number in cls.magic_numbers )
try:
_UpperCAmelCase : int = cls.read_magic_number(A , A )
except OSError:
return False
return any(magic_number.startswith(A ) for cls_magic_number in cls.magic_numbers )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
@classmethod
def _A ( cls : str , A : Union[Path, str] , **A : List[Any] ):
return tarfile.is_tarfile(A )
@staticmethod
def _A ( A : Union[str, Any] , A : str ):
def resolved(A : str ) -> str:
return os.path.realpath(os.path.abspath(A ) )
def badpath(A : str , A : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(A , A ) ).startswith(A )
def badlink(A : str , A : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_UpperCAmelCase : List[str] = resolved(os.path.join(A , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=A )
_UpperCAmelCase : Optional[int] = resolved(A )
for finfo in members:
if badpath(finfo.name , A ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(A , A ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(A , A ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
os.makedirs(A , exist_ok=A )
_UpperCAmelCase : int = tarfile.open(A )
tar_file.extractall(A , members=TarExtractor.safemembers(A , A ) )
tar_file.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = [b"\x1F\x8B"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with gzip.open(A , "rb" ) as gzip_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def _A ( cls : Dict , A : Union[Path, str] , A : bytes = b"" ):
if super().is_extractable(A , magic_number=A ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(A , "rb" ) as fp:
_UpperCAmelCase : Tuple = _EndRecData(A )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_UpperCAmelCase : Dict = fp.read(A ) # CD is where we expect it to be
if len(A ) == sizeCentralDir:
_UpperCAmelCase : Any = struct.unpack(A , A ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
os.makedirs(A , exist_ok=A )
with zipfile.ZipFile(A , "r" ) as zip_file:
zip_file.extractall(A )
zip_file.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with lzma.open(A ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(A , exist_ok=A )
_UpperCAmelCase : List[str] = rarfile.RarFile(A )
rf.extractall(A )
rf.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
_UpperCAmelCase : Optional[Any] = zstd.ZstdDecompressor()
with open(A , "rb" ) as ifh, open(A , "wb" ) as ofh:
dctx.copy_stream(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = [b"\x42\x5A\x68"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with bza.open(A , "rb" ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(A , exist_ok=A )
with pyazr.SevenZipFile(A , "r" ) as archive:
archive.extractall(A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = [b"\x04\x22\x4D\x18"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(A , "rb" ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _A ( cls : List[Any] ):
return max(
len(A )
for extractor in cls.extractors.values()
if issubclass(A , A )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _A ( A : Union[Path, str] , A : int ):
try:
return MagicNumberBaseExtractor.read_magic_number(A , magic_number_length=A )
except OSError:
return b""
@classmethod
def _A ( cls : Optional[Any] , A : Union[Path, str] , A : bool = False ):
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=A , )
_UpperCAmelCase : Union[str, Any] = cls.infer_extractor_format(A )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _A ( cls : Dict , A : Union[Path, str] ): # <Added version="2.4.0"/>
_UpperCAmelCase : Optional[int] = cls._get_magic_number_max_length()
_UpperCAmelCase : str = cls._read_magic_number(A , A )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(A , magic_number=A ):
return extractor_format
@classmethod
def _A ( cls : List[str] , A : Union[Path, str] , A : Union[Path, str] , A : Optional[str] = None , A : Optional[BaseExtractor] = "deprecated" , ):
os.makedirs(os.path.dirname(A ) , exist_ok=A )
# Prevent parallel extractions
_UpperCAmelCase : Tuple = str(Path(A ).with_suffix(".lock" ) )
with FileLock(A ):
shutil.rmtree(A , ignore_errors=A )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(A , A ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=A , )
_UpperCAmelCase : Tuple = extractor if extractor != "deprecated" else extractor_format
else:
_UpperCAmelCase : Tuple = cls.extractors[extractor_format]
return extractor.extract(A , A )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=A , )
for extractor in cls.extractors.values():
if extractor.is_extractable(A ):
return extractor.extract(A , A )
| 31 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a : List[Any] = """__DUMMY_TRANSFORMERS_USER__"""
a : Tuple = """Dummy User"""
a : Optional[Any] = """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"""
a : Optional[Any] = """https://hub-ci.huggingface.co"""
a : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
a : Tuple = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
a : str = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> List[str]:
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _lowercase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Any:
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
HfFolder.save_token(_lowercase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> str:
return HfApi(endpoint=_lowercase )
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : str = HfFolder.get_token()
HfFolder.save_token(_lowercase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Any:
def _cleanup_repo(_lowercase ):
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> List[str]:
@contextmanager
def _temporary_repo(_lowercase ):
try:
yield repo_id
finally:
cleanup_repo(_lowercase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
UpperCAmelCase : str = F'''repo_txt_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : List[Any] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data/text_data.txt""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : Optional[int] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple:
UpperCAmelCase : List[Any] = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : List[str] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
return hf_private_dataset_repo_zipped_img_data_
| 265 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
"""simple docstring"""
a_ : int = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
a_ : List[Any] = Image.open(requests.get(__A , stream=__A ).raw ).convert('RGB' )
return image
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> Dict:
"""simple docstring"""
a_ : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Tuple , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
a_ : List[str] = dct.pop(__A )
a_ : Optional[Any] = val
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Optional[Any] ) -> str:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a_ : Dict = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
a_ : Union[str, Any] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
a_ : List[str] = torch.cat((q_bias, torch.zeros_like(__A , requires_grad=__A ), v_bias) )
a_ : Optional[int] = qkv_bias
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : List[str] ) -> Any:
"""simple docstring"""
a_ : int = 3_64 if 'coco' in model_name else 2_24
a_ : Union[str, Any] = BlipaVisionConfig(image_size=__A ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
a_ : Tuple = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=__A ).to_dict()
elif "opt-6.7b" in model_name:
a_ : str = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=__A ).to_dict()
elif "t5-xl" in model_name:
a_ : Optional[int] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a_ : Any = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
a_ : List[str] = BlipaConfig(vision_config=__A , text_config=__A )
return config, image_size
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : List[Any]=None , __A : Any=False ) -> List[Any]:
"""simple docstring"""
a_ : Optional[int] = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
a_ : Optional[int] = tokenizer('\n' , add_special_tokens=__A ).input_ids[0]
a_ , a_ : int = get_blipa_config(__A , eos_token_id=__A )
a_ : Optional[int] = BlipaForConditionalGeneration(__A ).eval()
a_ : Dict = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
a_ , a_ : Dict = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
a_ : List[str] = 'cuda' if torch.cuda.is_available() else 'cpu'
a_ , a_ , a_ : Union[str, Any] = load_model_and_preprocess(
name=__A , model_type=__A , is_eval=__A , device=__A )
original_model.eval()
print('Done!' )
# update state dict keys
a_ : Union[str, Any] = original_model.state_dict()
a_ : Tuple = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a_ : int = state_dict.pop(__A )
if key.startswith('Qformer.bert' ):
a_ : List[str] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
a_ : List[str] = key.replace('self' , 'attention' )
if "opt_proj" in key:
a_ : Union[str, Any] = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
a_ : Optional[Any] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
a_ : List[Any] = key.replace('opt' , 'language' )
if key.startswith('t5' ):
a_ : Dict = key.replace('t5' , 'language' )
a_ : Dict = val
# read in qv biases
read_in_q_v_bias(__A , __A )
a_ , a_ : List[str] = hf_model.load_state_dict(__A , strict=__A )
assert len(__A ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
a_ : Tuple = load_demo_image()
a_ : int = vis_processors['eval'](__A ).unsqueeze(0 ).to(__A )
a_ : str = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(__A )
# create processor
a_ : Any = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=__A , image_std=__A )
a_ : Optional[int] = BlipaProcessor(image_processor=__A , tokenizer=__A )
a_ : Tuple = processor(images=__A , return_tensors='pt' ).pixel_values.to(__A )
# make sure processor creates exact same pixel values
assert torch.allclose(__A , __A )
original_model.to(__A )
hf_model.to(__A )
with torch.no_grad():
if "opt" in model_name:
a_ : List[Any] = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
a_ : Dict = hf_model(__A , __A ).logits
else:
a_ : Union[str, Any] = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
a_ : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
a_ : List[Any] = hf_model(__A , __A , labels=__A ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
a_ : List[str] = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=__A )
assert torch.allclose(logits[0, :3, :3] , __A , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
a_ : int = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=__A )
else:
# cast to same type
a_ : List[Any] = logits.dtype
assert torch.allclose(original_logits.to(__A ) , __A , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
a_ : Any = ''
a_ : Optional[Any] = tokenizer(__A , return_tensors='pt' ).input_ids.to(__A )
a_ : List[Any] = original_model.generate({'image': original_pixel_values} )
a_ : Optional[int] = hf_model.generate(
__A , __A , do_sample=__A , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , __A )
a_ : List[str] = input_ids.shape[1]
a_ : List[str] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__A )
a_ : int = [text.strip() for text in output_text]
print('HF generation:' , __A )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__A )
hf_model.save_pretrained(__A )
if push_to_hub:
processor.push_to_hub(F"""nielsr/{model_name}""" )
hf_model.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
UpperCAmelCase_ : List[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 32 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , **A ) -> List[str]:
super().__init__(**A )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , A , **A ) -> Optional[Any]:
return super().__call__(A , **A )
def _lowercase( self , **A ) -> Optional[Any]:
UpperCAmelCase : List[Any] = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Dict = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
UpperCAmelCase : Optional[Any] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase( self , A , A=None , A="This is a photo of {}." ) -> Optional[Any]:
UpperCAmelCase : int = load_image(A )
UpperCAmelCase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
UpperCAmelCase : List[str] = candidate_labels
UpperCAmelCase : Tuple = [hypothesis_template.format(A ) for x in candidate_labels]
UpperCAmelCase : Union[str, Any] = self.tokenizer(A , return_tensors=self.framework , padding=A )
UpperCAmelCase : Union[str, Any] = [text_inputs]
return inputs
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : List[Any] = model_inputs.pop("""candidate_labels""" )
UpperCAmelCase : Optional[Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , A ):
UpperCAmelCase : Optional[Any] = text_inputs[0]
else:
# Batching case.
UpperCAmelCase : Any = text_inputs[0][0]
UpperCAmelCase : Dict = self.model(**A , **A )
UpperCAmelCase : List[Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def _lowercase( self , A ) -> Union[str, Any]:
UpperCAmelCase : int = model_outputs.pop("""candidate_labels""" )
UpperCAmelCase : int = model_outputs["""logits"""][0]
if self.framework == "pt":
UpperCAmelCase : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCAmelCase : Any = probs.tolist()
if not isinstance(A , A ):
UpperCAmelCase : Any = [scores]
elif self.framework == "tf":
UpperCAmelCase : List[str] = stable_softmax(A , axis=-1 )
UpperCAmelCase : Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCAmelCase : Any = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(A , A ) , key=lambda A : -x[0] )
]
return result
| 265 | 0 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowercase ( __snake_case : List[Any] , __snake_case : Tuple=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def lowercase ( __snake_case : str , __snake_case : str=0 ):
lowercase_ : Dict = []
for old_item in old_list:
lowercase_ : Optional[Any] = old_item.replace('''in_layers.0''' , '''norm1''' )
lowercase_ : List[Any] = new_item.replace('''in_layers.2''' , '''conv1''' )
lowercase_ : Any = new_item.replace('''out_layers.0''' , '''norm2''' )
lowercase_ : int = new_item.replace('''out_layers.3''' , '''conv2''' )
lowercase_ : Any = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
lowercase_ : Tuple = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
lowercase_ : Union[str, Any] = shave_segments(__snake_case , n_shave_prefix_segments=__snake_case )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowercase ( __snake_case : Union[str, Any] , __snake_case : Optional[int]=0 ):
lowercase_ : List[Any] = []
for old_item in old_list:
lowercase_ : Optional[int] = old_item
lowercase_ : Tuple = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
lowercase_ : Dict = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
lowercase_ : Optional[Any] = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
lowercase_ : Dict = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
lowercase_ : Tuple = shave_segments(__snake_case , n_shave_prefix_segments=__snake_case )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowercase ( __snake_case : str , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : Dict=None ):
assert isinstance(__snake_case , __snake_case ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowercase_ : List[str] = old_checkpoint[path]
lowercase_ : Union[str, Any] = old_tensor.shape[0] // 3
lowercase_ : List[str] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowercase_ : Optional[Any] = old_tensor.shape[0] // config['''num_head_channels'''] // 3
lowercase_ : Optional[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowercase_ , lowercase_ , lowercase_ : Dict = old_tensor.split(channels // num_heads , dim=1 )
lowercase_ : int = query.reshape(__snake_case )
lowercase_ : Optional[int] = key.reshape(__snake_case )
lowercase_ : Optional[int] = value.reshape(__snake_case )
for path in paths:
lowercase_ : Optional[int] = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowercase_ : Optional[Any] = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
lowercase_ : Optional[Any] = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
lowercase_ : str = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
lowercase_ : List[str] = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowercase_ : List[Any] = old_checkpoint[path['''old''']][:, :, 0]
else:
lowercase_ : str = old_checkpoint[path['''old''']]
def lowercase ( __snake_case : int , __snake_case : List[Any] ):
lowercase_ : Any = {}
lowercase_ : str = checkpoint['''time_embed.0.weight''']
lowercase_ : Tuple = checkpoint['''time_embed.0.bias''']
lowercase_ : Tuple = checkpoint['''time_embed.2.weight''']
lowercase_ : Dict = checkpoint['''time_embed.2.bias''']
lowercase_ : Tuple = checkpoint['''input_blocks.0.0.weight''']
lowercase_ : Union[str, Any] = checkpoint['''input_blocks.0.0.bias''']
lowercase_ : Dict = checkpoint['''out.0.weight''']
lowercase_ : Optional[int] = checkpoint['''out.0.bias''']
lowercase_ : Union[str, Any] = checkpoint['''out.2.weight''']
lowercase_ : Tuple = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
lowercase_ : Union[str, Any] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
lowercase_ : Tuple = {
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(__snake_case )
}
# Retrieves the keys for the middle blocks only
lowercase_ : str = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
lowercase_ : Optional[int] = {
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(__snake_case )
}
# Retrieves the keys for the output blocks only
lowercase_ : Union[str, Any] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
lowercase_ : Optional[int] = {
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(__snake_case )
}
for i in range(1 , __snake_case ):
lowercase_ : List[str] = (i - 1) // (config['''num_res_blocks'''] + 1)
lowercase_ : str = (i - 1) % (config['''num_res_blocks'''] + 1)
lowercase_ : List[Any] = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
lowercase_ : List[str] = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
lowercase_ : Any = checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
lowercase_ : Union[str, Any] = checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
lowercase_ : int = renew_resnet_paths(__snake_case )
lowercase_ : List[str] = {'''old''': F'''input_blocks.{i}.0''', '''new''': F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
lowercase_ : List[Any] = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path, resnet_op] , config=__snake_case )
if len(__snake_case ):
lowercase_ : List[str] = renew_attention_paths(__snake_case )
lowercase_ : Optional[Any] = {
'''old''': F'''input_blocks.{i}.1''',
'''new''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase_ : Optional[int] = {
F'''input_blocks.{i}.1.qkv.bias''': {
'''key''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'''query''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'''value''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
'''key''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'''query''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'''value''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , attention_paths_to_split=__snake_case , config=__snake_case , )
lowercase_ : List[Any] = middle_blocks[0]
lowercase_ : List[str] = middle_blocks[1]
lowercase_ : Union[str, Any] = middle_blocks[2]
lowercase_ : Dict = renew_resnet_paths(__snake_case )
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , config=__snake_case )
lowercase_ : Tuple = renew_resnet_paths(__snake_case )
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , config=__snake_case )
lowercase_ : int = renew_attention_paths(__snake_case )
lowercase_ : Union[str, Any] = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
__snake_case , __snake_case , __snake_case , attention_paths_to_split=__snake_case , config=__snake_case )
for i in range(__snake_case ):
lowercase_ : List[str] = i // (config['''num_res_blocks'''] + 1)
lowercase_ : Union[str, Any] = i % (config['''num_res_blocks'''] + 1)
lowercase_ : int = [shave_segments(__snake_case , 2 ) for name in output_blocks[i]]
lowercase_ : Dict = {}
for layer in output_block_layers:
lowercase_ , lowercase_ : Tuple = layer.split('''.''' )[0], shave_segments(__snake_case , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__snake_case )
else:
lowercase_ : List[Any] = [layer_name]
if len(__snake_case ) > 1:
lowercase_ : Any = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
lowercase_ : Optional[Any] = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
lowercase_ : int = renew_resnet_paths(__snake_case )
lowercase_ : Dict = renew_resnet_paths(__snake_case )
lowercase_ : Optional[Any] = {'''old''': F'''output_blocks.{i}.0''', '''new''': F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowercase_ : int = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
lowercase_ : Any = checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
lowercase_ : Any = checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(__snake_case ) == 2:
lowercase_ : Dict = []
if len(__snake_case ):
lowercase_ : int = renew_attention_paths(__snake_case )
lowercase_ : Union[str, Any] = {
'''old''': F'''output_blocks.{i}.1''',
'''new''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase_ : str = {
F'''output_blocks.{i}.1.qkv.bias''': {
'''key''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'''query''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'''value''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
'''key''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'''query''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'''value''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=__snake_case , )
else:
lowercase_ : Dict = renew_resnet_paths(__snake_case , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowercase_ : int = '''.'''.join(['''output_blocks''', str(__snake_case ), path['''old''']] )
lowercase_ : Optional[int] = '''.'''.join(['''up_blocks''', str(__snake_case ), '''resnets''', str(__snake_case ), path['''new''']] )
lowercase_ : Tuple = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__A : Tuple = parser.parse_args()
__A : Dict = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__A : Any = json.loads(f.read())
__A : str = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__A : Optional[Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__A : Any = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__A : Optional[int] = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__A : Tuple = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 33 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
return getitem, k
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
return setitem, k, v
def __lowerCamelCase ( _lowercase ) -> int:
return delitem, k
def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]:
try:
return fun(_lowercase , *_lowercase ), None
except Exception as e:
return None, e
a : List[str] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a : List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a : int = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a : List[Any] = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a : Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : List[str] = HashMap(initial_block_size=4 )
UpperCAmelCase : Dict = {}
for _, (fun, *args) in enumerate(_lowercase ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase )
UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase )
assert my_res == py_res
assert str(_lowercase ) == str(_lowercase )
assert set(_lowercase ) == set(_lowercase )
assert len(_lowercase ) == len(_lowercase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowercase ) -> bool:
return not name.startswith("""_""" )
UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )}
UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )}
assert dict_public_names > hash_public_names
| 265 | 0 |
'''simple docstring'''
import qiskit
def snake_case_ (_a : int , _a : int ):
UpperCAmelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
UpperCAmelCase = qiskit.QuantumCircuit(_a , _a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCAmelCase = qiskit.execute(_a , _a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_a )
if __name__ == "__main__":
print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 34 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a : List[str] = 2_5_0_0_0_4
a : List[str] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = MBartTokenizer
lowercase = MBartTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : str = MBartTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self ) -> int:
UpperCAmelCase : Optional[Any] = MBartTokenizer(A , keep_accents=A )
UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A )
UpperCAmelCase : int = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : str = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = 'facebook/mbart-large-en-ro'
lowercase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowercase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowercase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def _lowercase( cls ) -> Tuple:
UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
UpperCAmelCase : int = 1
return cls
def _lowercase( self ) -> Union[str, Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def _lowercase( self ) -> List[str]:
self.assertIn(A , self.tokenizer.all_special_ids )
UpperCAmelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase : int = self.tokenizer.decode(A , skip_special_tokens=A )
UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
UpperCAmelCase : int = 10
UpperCAmelCase : List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A )
self.assertEqual(len(A ) , A )
def _lowercase( self ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = tempfile.mkdtemp()
UpperCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
UpperCAmelCase : Tuple = MBartTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
UpperCAmelCase : Dict = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
UpperCAmelCase : Dict = targets["""input_ids"""]
UpperCAmelCase : Union[str, Any] = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 3034, 2, 250004]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250001,
} , )
| 265 | 0 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Dict ):
snake_case__ : Optional[int] = """"""
snake_case__ : List[Any] = """"""
snake_case__ : List[str] = []
snake_case__ : str = 0
snake_case__ : Union[str, Any] = 256
snake_case__ : Union[str, Any] = 0
snake_case__ : Any = 0
snake_case__ : List[Any] = 0
snake_case__ : Optional[Any] = 0
def lowerCamelCase ( self : Any , snake_case_ : Union[str, Any] ):
snake_case__ : Dict = cva.imread(snake_case_ , 0 )
snake_case__ : str = copy.deepcopy(self.img )
snake_case__ , snake_case__ , snake_case__ : List[str] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" )
snake_case__ : str = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
snake_case__ : str = x[i] / self.k
self.sk += prk
snake_case__ : int = (self.L - 1) * self.sk
if self.rem != 0:
snake_case__ : List[Any] = int(last % last )
snake_case__ : Tuple = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
snake_case__ : Tuple = int(np.ma.count(self.img ) / self.img[1].size )
snake_case__ : int = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case__ : Tuple = self.img[j][i]
if num != self.last_list[num]:
snake_case__ : str = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def lowerCamelCase ( self : Optional[int] ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def lowerCamelCase ( self : Optional[int] ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__a = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
__a = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 35 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
a : List[str] = get_tests_dir("""fixtures""")
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase : Tuple = mock.Mock()
UpperCAmelCase : List[str] = 500
UpperCAmelCase : Any = {}
UpperCAmelCase : List[str] = HTTPError
UpperCAmelCase : str = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head:
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase( self ) -> Any:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def _lowercase( self ) -> Union[str, Any]:
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(A )
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
@classmethod
def _lowercase( cls ) -> Dict:
UpperCAmelCase : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def _lowercase( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token )
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token )
UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def _lowercase( self ) -> Optional[int]:
CustomImageProcessor.register_for_auto_class()
UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 265 | 0 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class UpperCAmelCase_ ( a):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = tempfile.mkdtemp()
_lowerCAmelCase : int = 8
# DPR tok
_lowerCAmelCase : List[str] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname, "dpr_tokenizer")
os.makedirs(__a, exist_ok=__a)
_lowerCAmelCase : str = os.path.join(__a, DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
_lowerCAmelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_lowerCAmelCase : Optional[int] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCAmelCase : Any = {"unk_token": "<unk>"}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, "bart_tokenizer")
os.makedirs(__a, exist_ok=__a)
_lowerCAmelCase : str = os.path.join(__a, BART_VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Tuple = os.path.join(__a, BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
def snake_case__ ( self):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer"))
def snake_case__ ( self):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer"))
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
@require_tokenizers
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = os.path.join(self.tmpdirname, "rag_tokenizer")
_lowerCAmelCase : str = RagConfig(question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict())
_lowerCAmelCase : Dict = RagTokenizer(question_encoder=self.get_dpr_tokenizer(), generator=self.get_bart_tokenizer())
rag_config.save_pretrained(__a)
rag_tokenizer.save_pretrained(__a)
_lowerCAmelCase : Optional[int] = RagTokenizer.from_pretrained(__a, config=__a)
self.assertIsInstance(new_rag_tokenizer.question_encoder, __a)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab(), rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator, __a)
self.assertEqual(new_rag_tokenizer.generator.get_vocab(), rag_tokenizer.generator.get_vocab())
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = RagTokenizer.from_pretrained("facebook/rag-token-nq")
_lowerCAmelCase : int = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
_lowerCAmelCase : Tuple = tokenizer(__a)
self.assertIsNotNone(__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
_lowerCAmelCase : Optional[Any] = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
_lowerCAmelCase : Dict = tokenizer(__a)
self.assertIsNotNone(__a)
| 36 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( _lowercase ) -> Tuple:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : Any = create_tensor(_lowercase )
UpperCAmelCase : Union[str, Any] = gather(_lowercase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : Any = [state.process_index]
UpperCAmelCase : Union[str, Any] = gather_object(_lowercase )
assert len(_lowercase ) == state.num_processes, F'''{gathered_obj}, {len(_lowercase )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : Optional[int] = create_tensor(_lowercase )
UpperCAmelCase : List[str] = broadcast(_lowercase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __lowerCamelCase ( _lowercase ) -> Tuple:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
UpperCAmelCase : Tuple = torch.arange(state.num_processes ).to(state.device )
UpperCAmelCase : Optional[Any] = pad_across_processes(_lowercase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __lowerCamelCase ( _lowercase ) -> Dict:
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase : Optional[Any] = create_tensor(_lowercase )
UpperCAmelCase : Optional[Any] = reduce(_lowercase , """sum""" )
UpperCAmelCase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}'''
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase : Tuple = create_tensor(_lowercase )
UpperCAmelCase : Optional[int] = reduce(_lowercase , """mean""" )
UpperCAmelCase : str = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}'''
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
def __lowerCamelCase ( ) -> int:
UpperCAmelCase : List[Any] = PartialState()
state.print(F'''State: {state}''' )
state.print("""testing gather""" )
test_gather(_lowercase )
state.print("""testing gather_object""" )
test_gather_object(_lowercase )
state.print("""testing broadcast""" )
test_broadcast(_lowercase )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(_lowercase )
state.print("""testing reduce_sum""" )
test_reduce_sum(_lowercase )
state.print("""testing reduce_mean""" )
test_reduce_mean(_lowercase )
if __name__ == "__main__":
main()
| 265 | 0 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_lowerCAmelCase = datasets.logging.get_logger(__name__)
_lowerCAmelCase = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
_lowerCAmelCase = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
_lowerCAmelCase = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
_lowerCAmelCase = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://github.com/google-research/bleurt""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,codebase_urls=["""https://github.com/google-research/bleurt"""] ,reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
lowerCAmelCase__ : str = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
lowerCAmelCase__ : Union[str, Any] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowerCAmelCase__ : List[Any] = self.config_name.upper()
else:
raise KeyError(
F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
lowerCAmelCase__ : int = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowerCAmelCase__ : Dict = score.BleurtScorer(os.path.join(__UpperCAmelCase ,__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Union[str, Any] = self.scorer.score(references=__UpperCAmelCase ,candidates=__UpperCAmelCase )
return {"scores": scores}
| 37 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]:
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def _lowercase( self , A = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def _lowercase( self ) -> Dict:
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]:
if isinstance(A , A ):
UpperCAmelCase : List[str] = 1
elif isinstance(A , A ):
UpperCAmelCase : Dict = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
UpperCAmelCase : List[str] = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCAmelCase : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape
UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 )
UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase : List[str]
if negative_prompt is None:
UpperCAmelCase : Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
UpperCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
UpperCAmelCase : Any = negative_prompt
UpperCAmelCase : Dict = text_input_ids.shape[-1]
UpperCAmelCase : List[Any] = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : int = uncond_embeddings.shape[1]
UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 )
UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCAmelCase : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase : Dict = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
UpperCAmelCase : int = torch.randn(
A , generator=A , device=self.device , dtype=A )
UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCAmelCase : Optional[Any] = latents_reference.to(self.device )
UpperCAmelCase : Tuple = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx
UpperCAmelCase : List[str] = 0 if dy < 0 else dy
UpperCAmelCase : Union[str, Any] = max(-dx , 0 )
UpperCAmelCase : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : Optional[Any] = {}
if accepts_eta:
UpperCAmelCase : List[str] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : str = self.scheduler.scale_model_input(A , A )
# predict the noise residual
UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 )
UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents
UpperCAmelCase : Tuple = self.vae.decode(A ).sample
UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
UpperCAmelCase , UpperCAmelCase : int = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCAmelCase : Any = None
if output_type == "pil":
UpperCAmelCase : int = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 265 | 0 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : list ) -> int:
"""simple docstring"""
_enforce_args(__magic_name__ , __magic_name__ )
if n == 0:
return 0
UpperCamelCase :List[Any] = float("""-inf""" )
for i in range(1 , n + 1 ):
UpperCamelCase :Optional[Any] = max(
__magic_name__ , prices[i - 1] + naive_cut_rod_recursive(n - i , __magic_name__ ) )
return max_revue
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : list ) -> Union[str, Any]:
"""simple docstring"""
_enforce_args(__magic_name__ , __magic_name__ )
UpperCamelCase :Tuple = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__magic_name__ , __magic_name__ , __magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : list , __magic_name__ : list ) -> Union[str, Any]:
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase :Optional[int] = float("""-inf""" )
for i in range(1 , n + 1 ):
UpperCamelCase :List[str] = max(
__magic_name__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __magic_name__ , __magic_name__ ) , )
UpperCamelCase :List[str] = max_revenue
return max_rev[n]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : list ) -> str:
"""simple docstring"""
_enforce_args(__magic_name__ , __magic_name__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase :Tuple = [float("""-inf""" ) for _ in range(n + 1 )]
UpperCamelCase :Any = 0
for i in range(1 , n + 1 ):
UpperCamelCase :Tuple = max_rev[i]
for j in range(1 , i + 1 ):
UpperCamelCase :Union[str, Any] = max(__magic_name__ , prices[j - 1] + max_rev[i - j] )
UpperCamelCase :Dict = max_revenue_i
return max_rev[n]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : list ) -> Optional[int]:
"""simple docstring"""
if n < 0:
UpperCamelCase :Tuple = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__magic_name__ )
if n > len(__magic_name__ ):
UpperCamelCase :Optional[int] = (
"""Each integral piece of rod must have a corresponding price. """
f"""Got n = {n} but length of prices = {len(__magic_name__ )}"""
)
raise ValueError(__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase :Dict = [6, 10, 12, 15, 20, 23]
UpperCamelCase :Optional[Any] = len(__magic_name__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase :Dict = 36
UpperCamelCase :Optional[int] = top_down_cut_rod(__magic_name__ , __magic_name__ )
UpperCamelCase :Any = bottom_up_cut_rod(__magic_name__ , __magic_name__ )
UpperCamelCase :List[Any] = naive_cut_rod_recursive(__magic_name__ , __magic_name__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 38 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
def __init__( self , A , A=12 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=512 , A=0.0_2 , A=0 , A=None , ) -> Any:
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : int = use_input_mask
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : str = hidden_size
UpperCAmelCase : List[Any] = projection_dim
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Any = dropout
UpperCAmelCase : List[Any] = attention_dropout
UpperCAmelCase : Optional[Any] = max_position_embeddings
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = scope
UpperCAmelCase : Union[str, Any] = bos_token_id
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase : Tuple = input_mask.numpy()
UpperCAmelCase , UpperCAmelCase : int = input_mask.shape
UpperCAmelCase : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(A ):
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : int = self.get_config()
return config, input_ids, tf.convert_to_tensor(A )
def _lowercase( self ) -> int:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _lowercase( self , A , A , A ) -> Union[str, Any]:
UpperCAmelCase : int = TFBlipTextModel(config=A )
UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , training=A )
UpperCAmelCase : int = model(A , training=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs
UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = (TFBlipTextModel,) if is_tf_available() else ()
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = BlipTextModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Tuple:
self.config_tester.run_common_tests()
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> List[str]:
pass
def _lowercase( self ) -> Optional[int]:
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def _lowercase( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _lowercase( self ) -> Optional[int]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _lowercase( self ) -> Dict:
pass
@slow
def _lowercase( self ) -> Dict:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = TFBlipTextModel.from_pretrained(A )
self.assertIsNotNone(A )
def _lowercase( self , A=True ) -> str:
super().test_pt_tf_model_equivalence(allow_missing_keys=A )
| 265 | 0 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_UpperCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase , variant=UpperCAmelCase ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_UpperCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase , variant=UpperCAmelCase ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
_UpperCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase , variant=UpperCAmelCase ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_UpperCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCAmelCase , variant=UpperCAmelCase ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
_UpperCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase , variant=UpperCAmelCase ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
_UpperCAmelCase = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase , variant=UpperCAmelCase ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_UpperCAmelCase = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCAmelCase , variant=UpperCAmelCase ) )
| 39 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
a : str = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
a : int = """main"""
# Default branch name
a : Any = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
a : str = """aaaaaaa"""
# This commit does not exist, so we should 404.
a : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
a : Any = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def __lowerCamelCase ( ) -> List[str]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Optional[int]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> List[Any]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class UpperCamelCase_ ( unittest.TestCase ):
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase( self , A ) -> Tuple:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase( self , A ) -> Dict:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase( self , A ) -> Union[str, Any]:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def _lowercase( self ) -> Optional[int]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class UpperCamelCase_ ( __magic_name__ ):
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def _lowercase( self ) -> int:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class UpperCamelCase_ ( __magic_name__ ):
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def _lowercase( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class UpperCamelCase_ ( __magic_name__ ):
pass
self.assertEqual(find_labels(A ) , [] )
| 265 | 0 |
"""simple docstring"""
from math import sqrt
def lowercase ( A_ = 1_000_000 )-> int:
'''simple docstring'''
a : int = 0
a : int = 0
a : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(A_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 40 |
'''simple docstring'''
from itertools import count
def __lowerCamelCase ( _lowercase = 5_0 ) -> int:
UpperCAmelCase : Any = [1] * min_block_length
for n in count(_lowercase ):
fill_count_functions.append(1 )
for block_length in range(_lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 265 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Tuple = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCamelCase )
if number < 1:
lowerCamelCase__ : int = f'''Input value of [number={number}] must be > 0'''
raise ValueError(UpperCamelCase )
lowerCamelCase__ : Optional[int] = 1
for i in range(1 , UpperCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
from __future__ import annotations
import math
class UpperCamelCase_ :
def __init__( self , A ) -> None:
UpperCAmelCase : Optional[int] = size
# approximate the overall size of segment tree with given value
UpperCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCAmelCase : Any = [0 for i in range(0 , 4 * size )]
UpperCAmelCase : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update
def _lowercase( self , A ) -> int:
return idx * 2
def _lowercase( self , A ) -> int:
return idx * 2 + 1
def _lowercase( self , A , A , A , A ) -> None:
if left_element == right_element:
UpperCAmelCase : str = a[left_element - 1]
else:
UpperCAmelCase : Tuple = (left_element + right_element) // 2
self.build(self.left(A ) , A , A , A )
self.build(self.right(A ) , mid + 1 , A , A )
UpperCAmelCase : str = max(
self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] )
def _lowercase( self , A , A , A , A , A , A ) -> bool:
if self.flag[idx] is True:
UpperCAmelCase : Optional[Any] = self.lazy[idx]
UpperCAmelCase : int = False
if left_element != right_element:
UpperCAmelCase : List[str] = self.lazy[idx]
UpperCAmelCase : Optional[Any] = self.lazy[idx]
UpperCAmelCase : List[str] = True
UpperCAmelCase : int = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCAmelCase : Optional[Any] = val
if left_element != right_element:
UpperCAmelCase : Tuple = val
UpperCAmelCase : int = val
UpperCAmelCase : Any = True
UpperCAmelCase : str = True
return True
UpperCAmelCase : str = (left_element + right_element) // 2
self.update(self.left(A ) , A , A , A , A , A )
self.update(self.right(A ) , mid + 1 , A , A , A , A )
UpperCAmelCase : List[str] = max(
self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] )
return True
def _lowercase( self , A , A , A , A , A ) -> int | float:
if self.flag[idx] is True:
UpperCAmelCase : Any = self.lazy[idx]
UpperCAmelCase : Any = False
if left_element != right_element:
UpperCAmelCase : Optional[Any] = self.lazy[idx]
UpperCAmelCase : Tuple = self.lazy[idx]
UpperCAmelCase : List[str] = True
UpperCAmelCase : Tuple = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCAmelCase : Dict = (left_element + right_element) // 2
UpperCAmelCase : List[Any] = self.query(self.left(A ) , A , A , A , A )
UpperCAmelCase : str = self.query(self.right(A ) , mid + 1 , A , A , A )
return max(A , A )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , A , A ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
a : Optional[Any] = 1_5
a : Union[str, Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 265 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=18 , lowerCAmelCase_=30 , lowerCAmelCase_=4_00 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=True , ):
"""simple docstring"""
_snake_case = size if size is not None else {'height': 18, 'width': 18}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = apply_ocr
def lowerCamelCase ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = LayoutLMvaImageProcessingTester(self )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'apply_ocr' ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase_ )
self.assertIsInstance(encoding.boxes , lowerCAmelCase_ )
# Test batched
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
_snake_case = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
_snake_case = Image.open(ds[0]['file'] ).convert('RGB' )
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_snake_case = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase_ )
self.assertListEqual(encoding.boxes , lowerCAmelCase_ )
# with apply_OCR = False
_snake_case = LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase_ )
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 42 |
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( _lowercase , _lowercase ) -> Image:
def brightness(_lowercase ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_lowercase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
a : Optional[Any] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 265 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__UpperCamelCase :str = [[1, 2, 4], [1, 2, 3, 4]]
__UpperCamelCase :Dict = DisjunctiveConstraint(__lowercase)
self.assertTrue(isinstance(dc.token_ids , __lowercase))
with self.assertRaises(__lowercase):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(__lowercase):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def UpperCamelCase__ ( self) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__UpperCamelCase :Dict = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowercase):
DisjunctiveConstraint(__lowercase) # fails here
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Union[str, Any] = [[1, 2, 3], [1, 2, 4]]
__UpperCamelCase :Any = DisjunctiveConstraint(__lowercase)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = dc.update(1)
__UpperCamelCase :Union[str, Any] = stepped is True and completed is False and reset is False
self.assertTrue(__lowercase)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = dc.update(2)
__UpperCamelCase :Tuple = stepped is True and completed is False and reset is False
self.assertTrue(__lowercase)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = dc.update(3)
__UpperCamelCase :List[Any] = stepped is True and completed is True and reset is False
self.assertTrue(__lowercase)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Dict = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__UpperCamelCase :List[Any] = DisjunctiveConstraint(__lowercase)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[str] = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Union[str, Any] = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Union[str, Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Tuple = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 43 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Optional[int]:
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : Dict = use_input_mask
UpperCAmelCase : str = use_token_type_ids
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : int = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : List[str] = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : str = num_labels
UpperCAmelCase : Optional[int] = num_choices
UpperCAmelCase : Dict = scope
UpperCAmelCase : Union[str, Any] = vocab_size - 1
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowercase( self ) -> Optional[Any]:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase : Any = True
return config, input_ids, input_mask, token_labels
def _lowercase( self , A , A , A ) -> int:
UpperCAmelCase : str = GPTNeoXModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[str] = model(A , attention_mask=A )
UpperCAmelCase : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A ) -> Optional[int]:
UpperCAmelCase : str = True
UpperCAmelCase : Optional[Any] = GPTNeoXModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A ) -> List[str]:
UpperCAmelCase : Tuple = GPTNeoXForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : str = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A ) -> Tuple:
UpperCAmelCase : List[str] = self.num_labels
UpperCAmelCase : Any = GPTNeoXForQuestionAnswering(A )
model.to(A )
model.eval()
UpperCAmelCase : str = model(A , attention_mask=A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase( self , A , A , A , A ) -> int:
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A , A ) -> str:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Tuple = GPTNeoXForTokenClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase( self , A , A , A ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : str = GPTNeoXForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : List[str] = model(A , attention_mask=A , use_cache=A )
UpperCAmelCase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A )
UpperCAmelCase : Any = output_from_no_past["""hidden_states"""][0]
UpperCAmelCase : List[str] = model(
A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = GPTNeoXModelTester(self )
UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A , A , A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(A , A , A )
def _lowercase( self ) -> Optional[Any]:
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(A , A , A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*A )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def _lowercase( self ) -> Any:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _lowercase( self ) -> Optional[int]:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Dict = GPTNeoXModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : Any = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = GPTNeoXModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCAmelCase : int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(A )
UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase : List[str] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCAmelCase : Union[str, Any] = model.generate(**A , do_sample=A , max_new_tokens=20 )
UpperCAmelCase : Tuple = tokenizer.batch_decode(A )[0]
self.assertEqual(A , A )
| 265 | 0 |
"""simple docstring"""
class __A :
def __init__( self ):
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : List[str] = []
def __A ( self , a__ , a__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_lowerCAmelCase : str = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_lowerCAmelCase : Dict = self.__min_dist_top_down_dp(a__ , n - 1 )
_lowerCAmelCase : int = self.__min_dist_top_down_dp(m - 1 , a__ )
_lowerCAmelCase : str = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_lowerCAmelCase : Optional[int] = 1 + min(a__ , a__ , a__ )
return self.dp[m][n]
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = worda
_lowerCAmelCase : Any = worda
_lowerCAmelCase : List[Any] = [[-1 for _ in range(len(a__ ) )] for _ in range(len(a__ ) )]
return self.__min_dist_top_down_dp(len(a__ ) - 1 , len(a__ ) - 1 )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Dict = worda
_lowerCAmelCase : Union[str, Any] = worda
_lowerCAmelCase : Optional[Any] = len(a__ )
_lowerCAmelCase : Union[str, Any] = len(a__ )
_lowerCAmelCase : Tuple = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_lowerCAmelCase : Any = j
elif j == 0: # second string is empty
_lowerCAmelCase : Tuple = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_lowerCAmelCase : int = self.dp[i - 1][j - 1]
else:
_lowerCAmelCase : List[str] = self.dp[i][j - 1]
_lowerCAmelCase : Tuple = self.dp[i - 1][j]
_lowerCAmelCase : Dict = self.dp[i - 1][j - 1]
_lowerCAmelCase : Optional[int] = 1 + min(a__ , a__ , a__ )
return self.dp[m][n]
if __name__ == "__main__":
_a : List[str] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
_a : List[Any] = input('Enter the first string: ').strip()
_a : List[Any] = input('Enter the second string: ').strip()
print()
print(F"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(F"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 44 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
else:
return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> float:
if b < 0:
return 1 / actual_power(_lowercase , _lowercase )
return actual_power(_lowercase , _lowercase )
if __name__ == "__main__":
print(power(-2, -3))
| 265 | 0 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a=None , **_a ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _a , )
super().__init__(args=_a , **_a )
| 45 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : Any = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = AlbertTokenizer
lowercase = AlbertTokenizerFast
lowercase = True
lowercase = True
lowercase = True
def _lowercase( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[int] = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self , A ) -> int:
UpperCAmelCase : Optional[int] = """this is a test"""
UpperCAmelCase : Dict = """this is a test"""
return input_text, output_text
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = """<pad>"""
UpperCAmelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(A ) , 30000 )
def _lowercase( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : List[str] = self.get_rust_tokenizer()
UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé."""
UpperCAmelCase : str = tokenizer.tokenize(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A )
UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = AlbertTokenizer(A )
UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" )
UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase( self ) -> Dict:
# fmt: off
UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 265 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
SCREAMING_SNAKE_CASE__ = {"mobilebert-uncased": 512}
SCREAMING_SNAKE_CASE__ = {}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = MobileBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Tuple:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(lowercase , normalizer_state.pop("""type""" ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**lowercase )
lowerCAmelCase = do_lower_case
def _snake_case ( self , lowercase , lowercase=None ) -> Optional[int]:
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , lowercase , lowercase = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 46 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = StableDiffusionDiffEditPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase = frozenset([] )
def _lowercase( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , )
UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , )
UpperCAmelCase : List[Any] = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , )
torch.manual_seed(0 )
UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
UpperCAmelCase : Optional[Any] = CLIPTextModel(A )
UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowercase( self , A , A=0 ) -> Optional[Any]:
UpperCAmelCase : Any = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : List[Any] = torch.manual_seed(A )
else:
UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : int = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self , A , A=0 ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : Any = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self , A , A=0 ) -> str:
UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : str = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self ) -> List[Any]:
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
UpperCAmelCase : Dict = self.get_dummy_components()
UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A , A , A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase : Any = self.get_dummy_inputs(A )
UpperCAmelCase : Optional[Any] = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
UpperCAmelCase : Dict = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A , A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A )
UpperCAmelCase : Tuple = pipe_loaded(**A )[0]
UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max()
self.assertLess(A , 1e-4 )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[int] = """cpu"""
UpperCAmelCase : Optional[Any] = self.get_dummy_components()
UpperCAmelCase : List[str] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(A )
UpperCAmelCase : List[Any] = pipe.generate_mask(**A )
UpperCAmelCase : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase : Optional[int] = np.array([0] * 9 )
UpperCAmelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = """cpu"""
UpperCAmelCase : List[str] = self.get_dummy_components()
UpperCAmelCase : Optional[Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(A )
UpperCAmelCase : List[str] = pipe.invert(**A ).images
UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase : Dict = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
UpperCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
def _lowercase( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def _lowercase( self ) -> int:
UpperCAmelCase : List[Any] = """cpu"""
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""}
UpperCAmelCase : int = DPMSolverMultistepScheduler(**A )
UpperCAmelCase : int = DPMSolverMultistepInverseScheduler(**A )
UpperCAmelCase : List[str] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(A )
UpperCAmelCase : Any = pipe.invert(**A ).images
UpperCAmelCase : Dict = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase : Any = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _lowercase( cls ) -> Dict:
UpperCAmelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) )
UpperCAmelCase : List[str] = raw_image
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Dict = torch.manual_seed(0 )
UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa )
UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Tuple = """a bowl of fruit"""
UpperCAmelCase : List[Any] = """a bowl of pears"""
UpperCAmelCase : str = pipe.generate_mask(
image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , )
UpperCAmelCase : Tuple = pipe.invert(
prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents
UpperCAmelCase : Any = pipe(
prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
UpperCAmelCase : List[str] = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa )
UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : int = """a bowl of fruit"""
UpperCAmelCase : int = """a bowl of pears"""
UpperCAmelCase : str = pipe.generate_mask(
image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , )
UpperCAmelCase : Any = pipe.invert(
prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents
UpperCAmelCase : str = pipe(
prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
UpperCAmelCase : Tuple = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 265 | 0 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class A__ ( A__ ):
def __init__( self : Union[str, Any] , _a : Union[str, "sqlalchemy.sql.Selectable"] , _a : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _a : Optional[Features] = None , _a : str = None , _a : bool = False , **_a : Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a )
_SCREAMING_SNAKE_CASE =Sql(
cache_dir=_a , features=_a , sql=_a , con=_a , **_a , )
def A ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , )
# Build dataset for splits
_SCREAMING_SNAKE_CASE =self.builder.as_dataset(
split='train' , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class A__ :
def __init__( self : List[Any] , _a : Dataset , _a : str , _a : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _a : Optional[int] = None , _a : Optional[int] = None , **_a : int , ) -> str:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0." )
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =name
_SCREAMING_SNAKE_CASE =con
_SCREAMING_SNAKE_CASE =batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_SCREAMING_SNAKE_CASE =num_proc
_SCREAMING_SNAKE_CASE =to_sql_kwargs
def A ( self : Dict ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.to_sql_kwargs.pop('sql' , _a )
_SCREAMING_SNAKE_CASE =self.to_sql_kwargs.pop('con' , _a )
_SCREAMING_SNAKE_CASE =self.to_sql_kwargs.pop('index' , _a )
_SCREAMING_SNAKE_CASE =self._write(index=_a , **self.to_sql_kwargs )
return written
def A ( self : Dict , _a : List[str] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =args
_SCREAMING_SNAKE_CASE ={**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_SCREAMING_SNAKE_CASE =query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
_SCREAMING_SNAKE_CASE =batch.to_pandas()
_SCREAMING_SNAKE_CASE =df.to_sql(self.name , self.con , index=_a , **_a )
return num_rows or len(_a )
def A ( self : Any , _a : Union[str, Any] , **_a : List[str] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 47 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : Dict = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def __lowerCamelCase ( _lowercase ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape
UpperCAmelCase : Dict = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase : Optional[Any] = emb.weight.data
return lin_layer
def __lowerCamelCase ( _lowercase , _lowercase=None ) -> List[Any]:
UpperCAmelCase : Dict = {}
for old_key in state_dict.keys():
UpperCAmelCase : str = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase : int = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
UpperCAmelCase : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
UpperCAmelCase : str = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
UpperCAmelCase : Tuple = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
UpperCAmelCase : List[Any] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
UpperCAmelCase : Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase : str = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
UpperCAmelCase : Optional[int] = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
UpperCAmelCase : str = state_dict[old_key]
return new_dict
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ) -> Tuple:
UpperCAmelCase : Any = []
UpperCAmelCase : Dict = 0
os.makedirs(_lowercase , exist_ok=_lowercase )
for expert in range(_lowercase ):
UpperCAmelCase : str = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowercase ):
UpperCAmelCase : Optional[Any] = torch.load(_lowercase )["""model"""]
remove_ignore_keys_(_lowercase )
UpperCAmelCase : List[str] = rename_fairseq_keys(_lowercase , _lowercase )
UpperCAmelCase : Optional[Any] = os.path.join(
_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) )
torch.save(_lowercase , _lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowercase )[0]].dtype )
# Add the last block
UpperCAmelCase : Optional[Any] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) )
UpperCAmelCase : Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowercase )
UpperCAmelCase : Optional[Any] = rename_fairseq_keys(_lowercase , _lowercase )
UpperCAmelCase : int = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowercase ) == 1:
UpperCAmelCase : Dict = os.path.join(_lowercase , _lowercase )
torch.save(_lowercase , _lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowercase , _lowercase )
# Otherwise, let's build the index
UpperCAmelCase : Optional[int] = {}
for idx, shard in enumerate(_lowercase ):
UpperCAmelCase : List[str] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase ):05d}.bin''' )
UpperCAmelCase : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowercase , os.path.join(_lowercase , _lowercase ) )
for key in shard:
UpperCAmelCase : Tuple = shard_file
# Add the metadata
UpperCAmelCase : Any = {"""total_size""": total_size}
UpperCAmelCase : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowercase , _lowercase ) , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase : str = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + """\n"""
f.write(_lowercase )
return metadata, index
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a : int = parser.parse_args()
a , a : Any = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
a : str = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
a : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 265 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : str = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = 20
lowerCamelCase : Dict = self._get_uniform_logits(batch_size=2 , length=UpperCamelCase__ )
# tweak scores to not be uniform anymore
lowerCamelCase : Union[str, Any] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase : int = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase : Any = jax.nn.softmax(UpperCamelCase__ , axis=-1 )
lowerCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase : List[Any] = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase : Dict = jax.nn.softmax(temp_dist_warper_sharper(UpperCamelCase__ , scores.copy() , cur_len=UpperCamelCase__ ) , axis=-1 )
lowerCamelCase : Any = jax.nn.softmax(temp_dist_warper_smoother(UpperCamelCase__ , scores.copy() , cur_len=UpperCamelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> Any:
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = 10
lowerCamelCase : Dict = 2
# create ramp distribution
lowerCamelCase : str = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase : str = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase : Dict = FlaxTopKLogitsWarper(3 )
lowerCamelCase : str = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase : int = 5
lowerCamelCase : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase : int = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, length) ).copy()
lowerCamelCase : List[str] = top_k_warp_safety_check(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : int = 10
lowerCamelCase : Optional[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase : Optional[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase : Any = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase : Optional[int] = np.exp(top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase : str = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase : Optional[int] = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase : int = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase : Union[str, Any] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase : Optional[int] = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : List[str] = 20
lowerCamelCase : List[str] = 4
lowerCamelCase : List[str] = 0
lowerCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
# check that min length is applied at length 5
lowerCamelCase : Dict = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase : Tuple = 5
lowerCamelCase : Optional[Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = min_dist_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase : Optional[Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : str = 15
lowerCamelCase : List[Any] = min_dist_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def _lowercase ( self ) -> str:
lowerCamelCase : List[str] = 20
lowerCamelCase : List[Any] = 4
lowerCamelCase : str = 0
lowerCamelCase : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase : Any = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase : str = 1
lowerCamelCase : Tuple = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase : List[str] = 3
lowerCamelCase : Optional[Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Dict = 20
lowerCamelCase : Optional[int] = 4
lowerCamelCase : int = 0
lowerCamelCase : Optional[int] = 5
lowerCamelCase : int = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase : Tuple = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase : Optional[int] = 4
lowerCamelCase : int = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase : int = 3
lowerCamelCase : List[str] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Dict = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def _lowercase ( self ) -> Any:
lowerCamelCase : List[str] = 4
lowerCamelCase : Union[str, Any] = 10
lowerCamelCase : Dict = 15
lowerCamelCase : int = 2
lowerCamelCase : List[str] = 1
lowerCamelCase : List[str] = 15
# dummy input_ids and scores
lowerCamelCase : Dict = ids_tensor((batch_size, sequence_length) , UpperCamelCase__ )
lowerCamelCase : Tuple = input_ids.copy()
lowerCamelCase : Any = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : str = scores.copy()
# instantiate all dist processors
lowerCamelCase : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase : Dict = FlaxTopKLogitsWarper(3 )
lowerCamelCase : Tuple = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 10
# no processor list
lowerCamelCase : Any = temp_dist_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Tuple = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Dict = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : List[str] = min_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Optional[int] = bos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Dict = eos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# with processor list
lowerCamelCase : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase : int = processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : List[str] = 4
lowerCamelCase : int = 10
lowerCamelCase : List[str] = 15
lowerCamelCase : Optional[Any] = 2
lowerCamelCase : Optional[int] = 1
lowerCamelCase : Any = 15
# dummy input_ids and scores
lowerCamelCase : Dict = ids_tensor((batch_size, sequence_length) , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = input_ids.copy()
lowerCamelCase : List[Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : str = scores.copy()
# instantiate all dist processors
lowerCamelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase : Tuple = FlaxTopKLogitsWarper(3 )
lowerCamelCase : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
lowerCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = 10
# no processor list
def run_no_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Tuple = temp_dist_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Dict = min_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : List[str] = bos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Tuple = eos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
return scores
# with processor list
def run_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase : Optional[Any] = processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
return scores
lowerCamelCase : List[Any] = jax.jit(UpperCamelCase__ )
lowerCamelCase : Optional[int] = jax.jit(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = jitted_run_no_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Dict = jitted_run_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 48 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
a : Union[str, Any] = torch.device("""cpu""")
def __lowerCamelCase ( ) -> Any:
UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
def __lowerCamelCase ( _lowercase ) -> Dict:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : Union[str, Any] = dct.pop(_lowercase )
UpperCAmelCase : str = val
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCAmelCase : Tuple = []
for k in state_dict.keys():
UpperCAmelCase : Dict = k
if ".pwconv" in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
UpperCAmelCase : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
UpperCAmelCase : Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
UpperCAmelCase : Optional[Any] = k_new.split(""".""" )
if ls[2].isdigit():
UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase : List[Any] = 1_0_0_0
UpperCAmelCase : List[str] = """huggingface/label-files"""
UpperCAmelCase : Tuple = """imagenet-1k-id2label.json"""
UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Tuple = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Tuple = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase : List[Any] = [3, 3, 6, 4]
UpperCAmelCase : int = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase : str = [3, 3, 9, 6]
UpperCAmelCase : str = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase : List[Any] = [4, 3, 1_0, 5]
UpperCAmelCase : Union[str, Any] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase : Any = [4, 4, 1_2, 6]
UpperCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase )
else:
UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" )
UpperCAmelCase : str = checkpoint
UpperCAmelCase : Tuple = create_rename_keys(_lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# load HuggingFace model
UpperCAmelCase : str = SwiftFormerForImageClassification(_lowercase ).eval()
hf_model.load_state_dict(_lowercase )
# prepare test inputs
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
UpperCAmelCase : List[str] = processor(images=_lowercase , return_tensors="""pt""" )
# compare outputs from both models
UpperCAmelCase : List[str] = get_expected_output(_lowercase )
UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
a : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 265 | 0 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _A :
def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=13 , __SCREAMING_SNAKE_CASE : Union[str, Any]=7 , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=99 , __SCREAMING_SNAKE_CASE : List[str]=32 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=4 , __SCREAMING_SNAKE_CASE : Any=37 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : str="None" , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : Optional[int]=4 , __SCREAMING_SNAKE_CASE : List[str]=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = relative_attention
__a = position_biased_input
__a = pos_att_type
__a = scope
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length])
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__a = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = TFDebertaVaModel(config=__SCREAMING_SNAKE_CASE)
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__a = [input_ids, input_mask]
__a = model(__SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = TFDebertaVaForMaskedLM(config=__SCREAMING_SNAKE_CASE)
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = self.num_labels
__a = TFDebertaVaForSequenceClassification(config=__SCREAMING_SNAKE_CASE)
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = self.num_labels
__a = TFDebertaVaForTokenClassification(config=__SCREAMING_SNAKE_CASE)
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = TFDebertaVaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE)
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Dict = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase__ : Optional[int] = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ : int = False
UpperCamelCase__ : Optional[int] = False
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = TFDebertaVaModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''')
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
@require_tf
class _A ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''')
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
pass
@slow
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''')
__a = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]])
__a = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)[0]
__a = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]])
tf.debugging.assert_near(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4)
| 49 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowerCamelCase ( ) -> Any:
raise RuntimeError("""CUDA out of memory.""" )
class UpperCamelCase_ ( nn.Module ):
def __init__( self ) -> Any:
super().__init__()
UpperCAmelCase : Tuple = nn.Linear(3 , 4 )
UpperCAmelCase : Tuple = nn.BatchNormad(4 )
UpperCAmelCase : int = nn.Linear(4 , 5 )
def _lowercase( self , A ) -> Any:
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(A , [128, 64, 32, 16, 8] )
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase , UpperCAmelCase : Optional[int] = mock_training_loop_function("""hello""" )
self.assertListEqual(A , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def _lowercase( self ) -> Any:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(A ):
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A , A ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(A ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def _lowercase( self ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated()
UpperCAmelCase : List[str] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , A )
UpperCAmelCase : Tuple = release_memory(A )
self.assertEqual(torch.cuda.memory_allocated() , A )
| 265 | 0 |
from __future__ import annotations
from typing import Any
class lowerCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : float = 0 ) -> None:
lowerCamelCase__ , lowerCamelCase__ : str = row, column
lowerCamelCase__ : str = [[default_value for c in range(UpperCAmelCase )] for r in range(UpperCAmelCase )]
def __str__( self : str ) -> str:
lowerCamelCase__ : str = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
lowerCamelCase__ : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase__ : List[Any] = max(UpperCAmelCase , len(str(UpperCAmelCase ) ) )
lowerCamelCase__ : List[Any] = F"""%{max_element_length}s"""
# Make string and return
def single_line(UpperCAmelCase : list[float] ) -> str:
nonlocal string_format_identifier
lowerCamelCase__ : List[Any] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase ) for row_vector in self.array )
return s
def __repr__( self : Optional[Any] ) -> str:
return str(self )
def A_ ( self : Any , UpperCAmelCase : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase , (list, tuple) ) and len(UpperCAmelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : str , UpperCAmelCase : tuple[int, int] , UpperCAmelCase : float ) -> None:
assert self.validate_indicies(UpperCAmelCase )
lowerCamelCase__ : Dict = value
def __add__( self : Optional[Any] , UpperCAmelCase : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase , UpperCAmelCase )
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase__ : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : Dict = self[r, c] + another[r, c]
return result
def __neg__( self : Dict ) -> Matrix:
lowerCamelCase__ : int = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : List[str] = -self[r, c]
return result
def __sub__( self : List[Any] , UpperCAmelCase : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : Tuple , UpperCAmelCase : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase , (int, float) ): # Scalar multiplication
lowerCamelCase__ : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : Dict = self[r, c] * another
return result
elif isinstance(UpperCAmelCase , UpperCAmelCase ): # Matrix multiplication
assert self.column == another.row
lowerCamelCase__ : Optional[int] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase__ : Any = F"""Unsupported type given for another ({type(UpperCAmelCase )})"""
raise TypeError(UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> Matrix:
lowerCamelCase__ : str = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ : Tuple = self[r, c]
return result
def A_ ( self : Tuple , UpperCAmelCase : Matrix , UpperCAmelCase : Matrix ) -> Any:
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(UpperCAmelCase , UpperCAmelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase__ : Any = v.transpose()
lowerCamelCase__ : Dict = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( ) -> None:
# a^(-1)
lowerCamelCase__ : str = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCamelCase__ : str = 1
print(F"""a^(-1) is {ainv}""" )
# u, v
lowerCamelCase__ : Any = Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = 1, 2, -3
lowerCamelCase__ : Tuple = Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = 4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(_UpperCAmelCase , _UpperCAmelCase )}""" )
def SCREAMING_SNAKE_CASE ( ) -> None:
import doctest
doctest.testmod()
testa()
| 50 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[int] = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 265 | 0 |
def A (__A : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ = generate_pascal_triangle(__A )
for row_idx in range(__A ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def A (__A : int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__A , __A ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
UpperCAmelCase_ = []
for current_row_idx in range(__A ):
UpperCAmelCase_ = populate_current_row(__A , __A )
triangle.append(__A )
return triangle
def A (__A : list[list[int]] , __A : int ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase_ , UpperCAmelCase_ = 1, 1
for current_col_idx in range(1 , __A ):
calculate_current_element(
__A , __A , __A , __A )
return current_row
def A (__A : list[list[int]] , __A : list[int] , __A : int , __A : int , ) -> None:
"""simple docstring"""
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase_ = above_to_left_elt + above_to_right_elt
def A (__A : int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__A , __A ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
UpperCAmelCase_ = [[1]]
for row_index in range(1 , __A ):
UpperCAmelCase_ = [0] + result[-1] + [0]
UpperCAmelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase_ = sum(divmod(__A , 2 ) )
UpperCAmelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase_ = row_first_half + row_second_half
result.append(__A )
return result
def A () -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__A : Callable , __A : int ) -> None:
UpperCAmelCase_ = F"""{func.__name__}({value})"""
UpperCAmelCase_ = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__A , __A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 51 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = LongformerTokenizer
lowercase = True
lowercase = LongformerTokenizerFast
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCAmelCase : int = dict(zip(A , range(len(A ) ) ) )
UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def _lowercase( self , **A ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , **A ) -> int:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = """lower newer"""
UpperCAmelCase : Optional[int] = """lower newer"""
return input_text, output_text
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase : Dict = """lower newer"""
UpperCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
UpperCAmelCase : Tuple = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokens + [tokenizer.unk_token]
UpperCAmelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A )
UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A )
UpperCAmelCase : List[str] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : List[str] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : str = self.get_tokenizer()
UpperCAmelCase : List[Any] = """Encode this sequence."""
UpperCAmelCase : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A , A )
UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A , A )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A , A )
# Testing spaces after special tokens
UpperCAmelCase : Union[str, Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
UpperCAmelCase : Union[str, Any] = """Encode <mask> sequence"""
UpperCAmelCase : Union[str, Any] = """Encode <mask>sequence"""
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Union[str, Any] = encoded.index(A )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A , A )
UpperCAmelCase : Tuple = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = encoded.index(A )
UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A , A )
def _lowercase( self ) -> Optional[int]:
pass
def _lowercase( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence."""
UpperCAmelCase : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
UpperCAmelCase : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def _lowercase( self ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""trim_offsets"""] , A )
def _lowercase( self ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase : int = f'''{text_of_1_token} {text_of_1_token}'''
UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[Any] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
| 265 | 0 |
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Dict = len(_lowerCAmelCase )
for i in range(length - 1 ):
UpperCamelCase : int = i
for k in range(i + 1 , _lowerCAmelCase ):
if collection[k] < collection[least]:
UpperCamelCase : Optional[Any] = k
if least != i:
UpperCamelCase , UpperCamelCase : Any = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__lowerCamelCase : str = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 52 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a : Tuple = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _lowercase( self , A , A , A ) -> Dict:
UpperCAmelCase : Union[str, Any] = ZeroShotClassificationPipeline(
model=A , tokenizer=A , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _lowercase( self , A , A ) -> Optional[int]:
UpperCAmelCase : Dict = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
# No kwarg
UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
UpperCAmelCase : str = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
UpperCAmelCase : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
UpperCAmelCase : Dict = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
# https://github.com/huggingface/transformers/issues/13846
UpperCAmelCase : str = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
A , [
{"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]}
for i in range(1 )
] , )
UpperCAmelCase : List[str] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
A , [
{"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]}
for i in range(2 )
] , )
with self.assertRaises(A ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(A ):
classifier(A , candidate_labels="""politics""" )
with self.assertRaises(A ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(A ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=A )
with self.assertRaises(A ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(A ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A , )
self.run_entailment_id(A )
def _lowercase( self , A ) -> Any:
UpperCAmelCase : Tuple = zero_shot_classifier.model.config
UpperCAmelCase : Union[str, Any] = config.labelaid
UpperCAmelCase : Tuple = zero_shot_classifier.entailment_id
UpperCAmelCase : Any = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
UpperCAmelCase : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCAmelCase : Any = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCAmelCase : List[str] = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
UpperCAmelCase : Tuple = original_labelaid
self.assertEqual(A , zero_shot_classifier.entailment_id )
@require_torch
def _lowercase( self ) -> str:
UpperCAmelCase : int = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
UpperCAmelCase : Union[str, Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
UpperCAmelCase : List[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[int] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
UpperCAmelCase : Optional[int] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
UpperCAmelCase : str = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
UpperCAmelCase : Tuple = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
UpperCAmelCase : Any = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 265 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__UpperCamelCase = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(__lowercase )
else:
__UpperCamelCase = sylvester(number - 1 )
__UpperCamelCase = num - 1
__UpperCamelCase = num
return lower * upper + 1
if __name__ == "__main__":
print(f'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 53 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a : List[Any] = """__DUMMY_TRANSFORMERS_USER__"""
a : Tuple = """Dummy User"""
a : Optional[Any] = """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"""
a : Optional[Any] = """https://hub-ci.huggingface.co"""
a : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
a : Tuple = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
a : str = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> List[str]:
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _lowercase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Any:
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
HfFolder.save_token(_lowercase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> str:
return HfApi(endpoint=_lowercase )
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : str = HfFolder.get_token()
HfFolder.save_token(_lowercase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Any:
def _cleanup_repo(_lowercase ):
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> List[str]:
@contextmanager
def _temporary_repo(_lowercase ):
try:
yield repo_id
finally:
cleanup_repo(_lowercase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
UpperCAmelCase : str = F'''repo_txt_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : List[Any] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data/text_data.txt""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : Optional[int] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple:
UpperCAmelCase : List[Any] = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : List[str] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
return hf_private_dataset_repo_zipped_img_data_
| 265 | 0 |
"""simple docstring"""
from __future__ import annotations
a__ : List[str] = tuple[int, int, int]
a__ : Dict = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
a__ : str = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# -------------------------- default selection --------------------------
# rotors --------------------------
a__ : str = '''EGZWVONAHDCLFQMSIPJBYUKXTR'''
a__ : List[str] = '''FOBHMDKEXQNRAULPGSJVTYICZW'''
a__ : Any = '''ZJXESIUQLHAVRMDOYGTNFWPBKC'''
# reflector --------------------------
a__ : Optional[int] = {
'''A''': '''N''',
'''N''': '''A''',
'''B''': '''O''',
'''O''': '''B''',
'''C''': '''P''',
'''P''': '''C''',
'''D''': '''Q''',
'''Q''': '''D''',
'''E''': '''R''',
'''R''': '''E''',
'''F''': '''S''',
'''S''': '''F''',
'''G''': '''T''',
'''T''': '''G''',
'''H''': '''U''',
'''U''': '''H''',
'''I''': '''V''',
'''V''': '''I''',
'''J''': '''W''',
'''W''': '''J''',
'''K''': '''X''',
'''X''': '''K''',
'''L''': '''Y''',
'''Y''': '''L''',
'''M''': '''Z''',
'''Z''': '''M''',
}
# -------------------------- extra rotors --------------------------
a__ : Tuple = '''RMDJXFUWGISLHVTCQNKYPBEZOA'''
a__ : str = '''SGLCPQWZHKXAREONTFBVIYJUDM'''
a__ : Optional[Any] = '''HVSICLTYKQUBXDWAJZOMFGPREN'''
a__ : List[Any] = '''RZWQHFMVDBKICJLNTUXAGYPSOE'''
a__ : Optional[int] = '''LFKIJODBEGAMQPXVUHYSTCZRWN'''
a__ : List[Any] = '''KOAEGVDHXPQZMLFTYWJNBRCIUS'''
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if (unique_rotsel := len(set(lowerCAmelCase_ ) )) < 3:
__SCREAMING_SNAKE_CASE = f"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(lowerCAmelCase_ )
# Checks if rotor positions are valid
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rotpos
if not 0 < rotorposa <= len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(lowerCAmelCase_ )
if not 0 < rotorposa <= len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowerCAmelCase_ )
if not 0 < rotorposa <= len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(lowerCAmelCase_ )
# Validates string and returns dict
__SCREAMING_SNAKE_CASE = _plugboard(lowerCAmelCase_ )
return rotpos, rotsel, pbdict
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = f"""Plugboard setting isn't type string ({type(lowerCAmelCase_ )})"""
raise TypeError(lowerCAmelCase_ )
elif len(lowerCAmelCase_ ) % 2 != 0:
__SCREAMING_SNAKE_CASE = f"""Odd number of symbols ({len(lowerCAmelCase_ )})"""
raise Exception(lowerCAmelCase_ )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
__SCREAMING_SNAKE_CASE = set()
for i in pbstring:
if i not in abc:
__SCREAMING_SNAKE_CASE = f"""'{i}' not in list of symbols"""
raise Exception(lowerCAmelCase_ )
elif i in tmppbl:
__SCREAMING_SNAKE_CASE = f"""Duplicate symbol ({i})"""
raise Exception(lowerCAmelCase_ )
else:
tmppbl.add(lowerCAmelCase_ )
del tmppbl
# Created the dictionary
__SCREAMING_SNAKE_CASE = {}
for j in range(0 , len(lowerCAmelCase_ ) - 1 , 2 ):
__SCREAMING_SNAKE_CASE = pbstring[j + 1]
__SCREAMING_SNAKE_CASE = pbstring[j]
return pb
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = (rotora, rotora, rotora) , lowerCAmelCase_ = "" , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = text.upper()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = _validator(
lowerCAmelCase_ , lowerCAmelCase_ , plugb.upper() )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rotor_position
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
__SCREAMING_SNAKE_CASE = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
__SCREAMING_SNAKE_CASE = plugboard[symbol]
# rotor ra --------------------------
__SCREAMING_SNAKE_CASE = abc.index(lowerCAmelCase_ ) + rotorposa
__SCREAMING_SNAKE_CASE = rotora[index % len(lowerCAmelCase_ )]
# rotor rb --------------------------
__SCREAMING_SNAKE_CASE = abc.index(lowerCAmelCase_ ) + rotorposa
__SCREAMING_SNAKE_CASE = rotora[index % len(lowerCAmelCase_ )]
# rotor rc --------------------------
__SCREAMING_SNAKE_CASE = abc.index(lowerCAmelCase_ ) + rotorposa
__SCREAMING_SNAKE_CASE = rotora[index % len(lowerCAmelCase_ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
__SCREAMING_SNAKE_CASE = reflector[symbol]
# 2nd rotors
__SCREAMING_SNAKE_CASE = abc[rotora.index(lowerCAmelCase_ ) - rotorposa]
__SCREAMING_SNAKE_CASE = abc[rotora.index(lowerCAmelCase_ ) - rotorposa]
__SCREAMING_SNAKE_CASE = abc[rotora.index(lowerCAmelCase_ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
__SCREAMING_SNAKE_CASE = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
a__ : Union[str, Any] = '''This is my Python script that emulates the Enigma machine from WWII.'''
a__ : Any = (1, 1, 1)
a__ : Optional[int] = '''pictures'''
a__ : Union[str, Any] = (rotora, rotora, rotora)
a__ : Tuple = enigma(message, rotor_pos, rotor_sel, pb)
print('''Encrypted message:''', en)
print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
| 54 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , **A ) -> List[str]:
super().__init__(**A )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , A , **A ) -> Optional[Any]:
return super().__call__(A , **A )
def _lowercase( self , **A ) -> Optional[Any]:
UpperCAmelCase : List[Any] = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Dict = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
UpperCAmelCase : Optional[Any] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase( self , A , A=None , A="This is a photo of {}." ) -> Optional[Any]:
UpperCAmelCase : int = load_image(A )
UpperCAmelCase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
UpperCAmelCase : List[str] = candidate_labels
UpperCAmelCase : Tuple = [hypothesis_template.format(A ) for x in candidate_labels]
UpperCAmelCase : Union[str, Any] = self.tokenizer(A , return_tensors=self.framework , padding=A )
UpperCAmelCase : Union[str, Any] = [text_inputs]
return inputs
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : List[Any] = model_inputs.pop("""candidate_labels""" )
UpperCAmelCase : Optional[Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , A ):
UpperCAmelCase : Optional[Any] = text_inputs[0]
else:
# Batching case.
UpperCAmelCase : Any = text_inputs[0][0]
UpperCAmelCase : Dict = self.model(**A , **A )
UpperCAmelCase : List[Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def _lowercase( self , A ) -> Union[str, Any]:
UpperCAmelCase : int = model_outputs.pop("""candidate_labels""" )
UpperCAmelCase : int = model_outputs["""logits"""][0]
if self.framework == "pt":
UpperCAmelCase : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCAmelCase : Any = probs.tolist()
if not isinstance(A , A ):
UpperCAmelCase : Any = [scores]
elif self.framework == "tf":
UpperCAmelCase : List[str] = stable_softmax(A , axis=-1 )
UpperCAmelCase : Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCAmelCase : Any = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(A , A ) , key=lambda A : -x[0] )
]
return result
| 265 | 0 |
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = int(UpperCAmelCase_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(UpperCAmelCase_ )
lowerCamelCase_ ,lowerCamelCase_ = divmod(UpperCAmelCase_ , 2 )
return binary_recursive(UpperCAmelCase_ ) + str(UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : str ):
lowerCamelCase_ = str(UpperCAmelCase_ ).strip()
if not number:
raise ValueError("No input value was provided" )
lowerCamelCase_ = "-" if number.startswith("-" ) else ""
lowerCamelCase_ = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return F'''{negative}0b{binary_recursive(int(UpperCAmelCase_ ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 55 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
return getitem, k
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
return setitem, k, v
def __lowerCamelCase ( _lowercase ) -> int:
return delitem, k
def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]:
try:
return fun(_lowercase , *_lowercase ), None
except Exception as e:
return None, e
a : List[str] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a : List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a : int = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a : List[Any] = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a : Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : List[str] = HashMap(initial_block_size=4 )
UpperCAmelCase : Dict = {}
for _, (fun, *args) in enumerate(_lowercase ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase )
UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase )
assert my_res == py_res
assert str(_lowercase ) == str(_lowercase )
assert set(_lowercase ) == set(_lowercase )
assert len(_lowercase ) == len(_lowercase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowercase ) -> bool:
return not name.startswith("""_""" )
UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )}
UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )}
assert dict_public_names > hash_public_names
| 265 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def __magic_name__ ( ) -> None:
'''simple docstring'''
assert nand_gate(0, 0 ) == 1
assert nand_gate(0, 1 ) == 1
assert nand_gate(1, 0 ) == 1
assert nand_gate(1, 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 56 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a : List[str] = 2_5_0_0_0_4
a : List[str] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = MBartTokenizer
lowercase = MBartTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : str = MBartTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self ) -> int:
UpperCAmelCase : Optional[Any] = MBartTokenizer(A , keep_accents=A )
UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A )
UpperCAmelCase : int = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : str = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = 'facebook/mbart-large-en-ro'
lowercase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowercase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowercase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def _lowercase( cls ) -> Tuple:
UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
UpperCAmelCase : int = 1
return cls
def _lowercase( self ) -> Union[str, Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def _lowercase( self ) -> List[str]:
self.assertIn(A , self.tokenizer.all_special_ids )
UpperCAmelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase : int = self.tokenizer.decode(A , skip_special_tokens=A )
UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
UpperCAmelCase : int = 10
UpperCAmelCase : List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A )
self.assertEqual(len(A ) , A )
def _lowercase( self ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = tempfile.mkdtemp()
UpperCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
UpperCAmelCase : Tuple = MBartTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
UpperCAmelCase : Dict = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
UpperCAmelCase : Dict = targets["""input_ids"""]
UpperCAmelCase : Union[str, Any] = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 3034, 2, 250004]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250001,
} , )
| 265 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : int = {"vocab_file": "sentencepiece.bpe.model"}
A : str = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
A : int = {
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
A : int = "▁"
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =VOCAB_FILES_NAMES
__UpperCAmelCase : int =PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict =["""input_ids""", """attention_mask"""]
def __init__( self , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a = None , **__a , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
__lowerCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
__lowerCAmelCase = len(self.sp_model ) - 1
__lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case ( self , __a , __a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self , __a , __a = None , __a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case ( self ):
return len(self.sp_model )
def snake_case ( self ):
__lowerCAmelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case ( self , __a ):
return self.sp_model.encode(__a , out_type=__a )
def snake_case ( self , __a ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCAmelCase = self.sp_model.PieceToId(__a )
return spm_id if spm_id else self.unk_token_id
def snake_case ( self , __a ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__a )
def snake_case ( self , __a ):
__lowerCAmelCase = []
__lowerCAmelCase = ""
__lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
__lowerCAmelCase = True
__lowerCAmelCase = []
else:
current_sub_tokens.append(__a )
__lowerCAmelCase = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __getstate__( self ):
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self , __a ):
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self , __a , __a = None ):
if not os.path.isdir(__a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 57 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
a : List[str] = get_tests_dir("""fixtures""")
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase : Tuple = mock.Mock()
UpperCAmelCase : List[str] = 500
UpperCAmelCase : Any = {}
UpperCAmelCase : List[str] = HTTPError
UpperCAmelCase : str = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head:
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase( self ) -> Any:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def _lowercase( self ) -> Union[str, Any]:
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(A )
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
@classmethod
def _lowercase( cls ) -> Dict:
UpperCAmelCase : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def _lowercase( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token )
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token )
UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def _lowercase( self ) -> Optional[int]:
CustomImageProcessor.register_for_auto_class()
UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 265 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , **A ) -> List[Any]:
super().__init__(**A )
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(A )
def __call__( self , A , A = None , **A , ) -> List[Any]:
if "text_queries" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs.pop("""text_queries""" )
if isinstance(A , (str, Image.Image) ):
_SCREAMING_SNAKE_CASE = {"""image""": image, """candidate_labels""": candidate_labels}
else:
_SCREAMING_SNAKE_CASE = image
_SCREAMING_SNAKE_CASE = super().__call__(A , **A )
return results
def snake_case_( self , **A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""threshold"""]
if "top_k" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""top_k"""]
return {}, {}, postprocess_params
def snake_case_( self , A ) -> List[Any]:
_SCREAMING_SNAKE_CASE = load_image(inputs["""image"""] )
_SCREAMING_SNAKE_CASE = inputs["""candidate_labels"""]
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = candidate_labels.split(""",""" )
_SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(A ):
_SCREAMING_SNAKE_CASE = self.tokenizer(A , return_tensors=self.framework )
_SCREAMING_SNAKE_CASE = self.image_processor(A , return_tensors=self.framework )
yield {
"is_last": i == len(A ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def snake_case_( self , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = model_inputs.pop("""target_size""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_label""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = self.model(**A )
_SCREAMING_SNAKE_CASE = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def snake_case_( self , A , A=0.1 , A=None ) -> str:
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
_SCREAMING_SNAKE_CASE = model_output["""candidate_label"""]
_SCREAMING_SNAKE_CASE = BaseModelOutput(A )
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=A , threshold=A , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
_SCREAMING_SNAKE_CASE = outputs["""scores"""][index].item()
_SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs["""boxes"""][index][0] )
_SCREAMING_SNAKE_CASE = {"""score""": score, """label""": label, """box""": box}
results.append(A )
_SCREAMING_SNAKE_CASE = sorted(A , key=lambda A : x["score"] , reverse=A )
if top_k:
_SCREAMING_SNAKE_CASE = results[:top_k]
return results
def snake_case_( self , A ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = box.int().tolist()
_SCREAMING_SNAKE_CASE = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 58 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( _lowercase ) -> Tuple:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : Any = create_tensor(_lowercase )
UpperCAmelCase : Union[str, Any] = gather(_lowercase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : Any = [state.process_index]
UpperCAmelCase : Union[str, Any] = gather_object(_lowercase )
assert len(_lowercase ) == state.num_processes, F'''{gathered_obj}, {len(_lowercase )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : Optional[int] = create_tensor(_lowercase )
UpperCAmelCase : List[str] = broadcast(_lowercase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __lowerCamelCase ( _lowercase ) -> Tuple:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
UpperCAmelCase : Tuple = torch.arange(state.num_processes ).to(state.device )
UpperCAmelCase : Optional[Any] = pad_across_processes(_lowercase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __lowerCamelCase ( _lowercase ) -> Dict:
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase : Optional[Any] = create_tensor(_lowercase )
UpperCAmelCase : Optional[Any] = reduce(_lowercase , """sum""" )
UpperCAmelCase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}'''
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase : Tuple = create_tensor(_lowercase )
UpperCAmelCase : Optional[int] = reduce(_lowercase , """mean""" )
UpperCAmelCase : str = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}'''
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
def __lowerCamelCase ( ) -> int:
UpperCAmelCase : List[Any] = PartialState()
state.print(F'''State: {state}''' )
state.print("""testing gather""" )
test_gather(_lowercase )
state.print("""testing gather_object""" )
test_gather_object(_lowercase )
state.print("""testing broadcast""" )
test_broadcast(_lowercase )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(_lowercase )
state.print("""testing reduce_sum""" )
test_reduce_sum(_lowercase )
state.print("""testing reduce_mean""" )
test_reduce_mean(_lowercase )
if __name__ == "__main__":
main()
| 265 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCamelCase = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple=None ):
if rng is None:
snake_case : Optional[Any] = random.Random()
snake_case : Union[str, Any] = 1
for dim in shape:
total_dims *= dim
snake_case : str = []
for _ in range(__lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
snake_case : List[Any] = np.array(__lowerCamelCase , dtype=jnp.intaa ).reshape(__lowerCamelCase )
return output
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]=None ):
snake_case : Any = ids_tensor(__lowerCamelCase , vocab_size=2 , rng=__lowerCamelCase )
# make sure that at least one token is attended to for each batch
snake_case : List[str] = 1
return attn_mask
@require_flax
class UpperCAmelCase :
A__ : Dict = None
A__ : Optional[int] = ()
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any:
'''simple docstring'''
snake_case , snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
snake_case : str = 2
snake_case : int = inputs["input_ids"].shape[-1] // 2
snake_case : Union[str, Any] = inputs["input_ids"][:max_batch_size, :sequence_length]
snake_case : Tuple = jnp.ones_like(snake_case__ )
snake_case : str = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
snake_case : Any = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
snake_case : Union[str, Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : Tuple = self._get_input_ids_and_config()
snake_case : Union[str, Any] = False
snake_case : Union[str, Any] = max_length
snake_case : List[Any] = 0
for model_class in self.all_generative_model_classes:
snake_case : List[Any] = model_class(snake_case__ )
snake_case : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case : List[str] = getattr(snake_case__ , snake_case__ )
snake_case : Optional[int] = pt_model_class(snake_case__ ).eval()
snake_case : Tuple = load_flax_weights_in_pytorch_model(snake_case__ , flax_model.params )
snake_case : str = flax_model.generate(snake_case__ ).sequences
snake_case : str = pt_model.generate(torch.tensor(snake_case__ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
snake_case : Tuple = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : str = self._get_input_ids_and_config()
snake_case : Union[str, Any] = False
snake_case : List[str] = max_length
for model_class in self.all_generative_model_classes:
snake_case : int = model_class(snake_case__ )
snake_case : Dict = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : str = jit(model.generate )
snake_case : Optional[int] = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : List[Any] = self._get_input_ids_and_config()
snake_case : Optional[Any] = True
snake_case : int = max_length
for model_class in self.all_generative_model_classes:
snake_case : List[Any] = model_class(snake_case__ )
snake_case : List[str] = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : Optional[int] = jit(model.generate )
snake_case : int = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : int = self._get_input_ids_and_config()
snake_case : List[str] = False
snake_case : Optional[Any] = max_length
snake_case : List[Any] = 2
for model_class in self.all_generative_model_classes:
snake_case : int = model_class(snake_case__ )
snake_case : Any = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : int = jit(model.generate )
snake_case : Dict = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : List[Any] = self._get_input_ids_and_config()
snake_case : str = False
snake_case : Optional[int] = max_length
snake_case : Union[str, Any] = 2
snake_case : Optional[int] = 2
for model_class in self.all_generative_model_classes:
snake_case : str = model_class(snake_case__ )
snake_case : Dict = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : Any = self._get_input_ids_and_config()
snake_case : int = True
snake_case : Dict = max_length
snake_case : Optional[int] = 0.8
snake_case : Dict = 10
snake_case : Optional[int] = 0.3
snake_case : Tuple = 1
snake_case : Optional[Any] = 8
snake_case : List[Any] = 9
for model_class in self.all_generative_model_classes:
snake_case : Optional[int] = model_class(snake_case__ )
snake_case : Union[str, Any] = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : Optional[int] = jit(model.generate )
snake_case : Any = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : List[str] = self._get_input_ids_and_config()
snake_case : int = max_length
snake_case : int = 1
snake_case : Optional[int] = 8
snake_case : Any = 9
for model_class in self.all_generative_model_classes:
snake_case : Optional[int] = model_class(snake_case__ )
snake_case : int = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : List[Any] = jit(model.generate )
snake_case : Union[str, Any] = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : List[Any] = self._get_input_ids_and_config()
snake_case : List[Any] = max_length
snake_case : Dict = 2
snake_case : Any = 1
snake_case : str = 8
snake_case : Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
snake_case : Union[str, Any] = model_class(snake_case__ )
snake_case : Union[str, Any] = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : Optional[int] = jit(model.generate )
snake_case : Optional[Any] = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case : List[Any] = attention_mask.at[(0, 0)].set(0 )
snake_case : Tuple = False
snake_case : Tuple = max_length
for model_class in self.all_generative_model_classes:
snake_case : Optional[int] = model_class(snake_case__ )
snake_case : str = model.generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : List[str] = jit(model.generate )
snake_case : Dict = jit_generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : Any = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case : List[str] = attention_mask.at[(0, 0)].set(0 )
snake_case : Optional[int] = True
snake_case : Any = max_length
for model_class in self.all_generative_model_classes:
snake_case : str = model_class(snake_case__ )
snake_case : Optional[Any] = model.generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : Optional[Any] = jit(model.generate )
snake_case : List[str] = jit_generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : str ) -> int:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case : Optional[int] = attention_mask.at[(0, 0)].set(0 )
snake_case : Optional[Any] = 2
snake_case : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
snake_case : Union[str, Any] = model_class(snake_case__ )
snake_case : Optional[Any] = model.generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : List[Any] = jit(model.generate )
snake_case : str = jit_generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
snake_case : List[str] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
snake_case : Any = "Hello world"
snake_case : str = tokenizer(snake_case__ , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(snake_case__ , "do_samples" ):
model.generate(snake_case__ , do_samples=snake_case__ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(snake_case__ , "foo" ):
snake_case : Optional[Any] = {"foo": "bar"}
model.generate(snake_case__ , **snake_case__ )
| 59 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]:
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def _lowercase( self , A = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def _lowercase( self ) -> Dict:
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]:
if isinstance(A , A ):
UpperCAmelCase : List[str] = 1
elif isinstance(A , A ):
UpperCAmelCase : Dict = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
UpperCAmelCase : List[str] = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCAmelCase : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape
UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 )
UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase : List[str]
if negative_prompt is None:
UpperCAmelCase : Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
UpperCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
UpperCAmelCase : Any = negative_prompt
UpperCAmelCase : Dict = text_input_ids.shape[-1]
UpperCAmelCase : List[Any] = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : int = uncond_embeddings.shape[1]
UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 )
UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCAmelCase : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase : Dict = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
UpperCAmelCase : int = torch.randn(
A , generator=A , device=self.device , dtype=A )
UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCAmelCase : Optional[Any] = latents_reference.to(self.device )
UpperCAmelCase : Tuple = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx
UpperCAmelCase : List[str] = 0 if dy < 0 else dy
UpperCAmelCase : Union[str, Any] = max(-dx , 0 )
UpperCAmelCase : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : Optional[Any] = {}
if accepts_eta:
UpperCAmelCase : List[str] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : str = self.scheduler.scale_model_input(A , A )
# predict the noise residual
UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 )
UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents
UpperCAmelCase : Tuple = self.vae.decode(A ).sample
UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
UpperCAmelCase , UpperCAmelCase : int = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCAmelCase : Any = None
if output_type == "pil":
UpperCAmelCase : int = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 265 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = GPTSanJapaneseTokenizer
__UpperCamelCase = False
__UpperCamelCase = {'''do_clean_text''': False, '''add_prefix_space''': False}
def lowerCamelCase__ ( self : Optional[int] ):
super().setUp()
# fmt: off
lowerCAmelCase : List[str] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase : int = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
lowerCAmelCase : List[str] = {'''unk_token''': '''<unk>'''}
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Union[str, Any] , **UpperCamelCase_ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : int = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
lowerCAmelCase : List[Any] = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Dict ):
lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_input_output_texts(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowerCAmelCase : List[str] = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
return text, ids
def lowerCamelCase__ ( self : Any ):
pass # TODO add if relevant
def lowerCamelCase__ ( self : Union[str, Any] ):
pass # TODO add if relevant
def lowerCamelCase__ ( self : Union[str, Any] ):
pass # TODO add if relevant
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase : int = '''こんにちは、世界。 こんばんは、㔺界。'''
lowerCAmelCase : Union[str, Any] = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
lowerCAmelCase : Dict = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Testing conversion to ids without special tokens
lowerCAmelCase : int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Testing conversion to ids with special tokens
lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
lowerCAmelCase : Tuple = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase : str = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
lowerCAmelCase : List[Any] = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
lowerCAmelCase : Dict = tokenizer.encode(UpperCamelCase_ )
lowerCAmelCase : Dict = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
lowerCAmelCase : Optional[int] = '''こんにちは、世界。'''
lowerCAmelCase : Dict = '''こんばんは、㔺界。😀'''
lowerCAmelCase : int = '''こんにちは、世界。こんばんは、世界。😀'''
lowerCAmelCase : Any = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase : List[str] = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
lowerCAmelCase : Any = tokenizer.encode(UpperCamelCase_ , prefix_text=UpperCamelCase_ )
lowerCAmelCase : Tuple = tokenizer.decode(UpperCamelCase_ )
lowerCAmelCase : List[str] = tokenizer.decode(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
lowerCAmelCase : Optional[int] = '''こんにちは、世界。'''
lowerCAmelCase : Union[str, Any] = '''こんばんは、㔺界。😀'''
lowerCAmelCase : Dict = len(tokenizer.encode(UpperCamelCase_ ) ) - 2
lowerCAmelCase : List[Any] = len(tokenizer.encode(UpperCamelCase_ ) ) - 2
lowerCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase : Tuple = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase : Dict = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase : Optional[int] = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase : Dict = tokenizer(UpperCamelCase_ , prefix_text=UpperCamelCase_ ).token_type_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
lowerCAmelCase : List[Any] = tokenizer.encode('''あンいワ''' )
lowerCAmelCase : List[Any] = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
lowerCAmelCase : List[Any] = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(UpperCamelCase_ ) , tokenizer.decode(UpperCamelCase_ ) )
self.assertEqual(tokenizer.decode(UpperCamelCase_ ) , tokenizer.decode(UpperCamelCase_ ) )
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
lowerCAmelCase : List[Any] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
lowerCAmelCase : List[str] = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(UpperCamelCase_ , padding=UpperCamelCase_ )
# fmt: off
lowerCAmelCase : Optional[Any] = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
lowerCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase : int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , UpperCamelCase_ )
self.assertListEqual(x_token.token_type_ids , UpperCamelCase_ )
self.assertListEqual(x_token.attention_mask , UpperCamelCase_ )
self.assertListEqual(x_token_a.input_ids , UpperCamelCase_ )
self.assertListEqual(x_token_a.token_type_ids , UpperCamelCase_ )
self.assertListEqual(x_token_a.attention_mask , UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase__ ( self : int ):
# tokenizer has no padding token
pass
| 60 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
def __init__( self , A , A=12 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=512 , A=0.0_2 , A=0 , A=None , ) -> Any:
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : int = use_input_mask
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : str = hidden_size
UpperCAmelCase : List[Any] = projection_dim
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Any = dropout
UpperCAmelCase : List[Any] = attention_dropout
UpperCAmelCase : Optional[Any] = max_position_embeddings
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = scope
UpperCAmelCase : Union[str, Any] = bos_token_id
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase : Tuple = input_mask.numpy()
UpperCAmelCase , UpperCAmelCase : int = input_mask.shape
UpperCAmelCase : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(A ):
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : int = self.get_config()
return config, input_ids, tf.convert_to_tensor(A )
def _lowercase( self ) -> int:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _lowercase( self , A , A , A ) -> Union[str, Any]:
UpperCAmelCase : int = TFBlipTextModel(config=A )
UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , training=A )
UpperCAmelCase : int = model(A , training=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs
UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = (TFBlipTextModel,) if is_tf_available() else ()
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = BlipTextModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Tuple:
self.config_tester.run_common_tests()
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> List[str]:
pass
def _lowercase( self ) -> Optional[int]:
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def _lowercase( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _lowercase( self ) -> Optional[int]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _lowercase( self ) -> Dict:
pass
@slow
def _lowercase( self ) -> Dict:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = TFBlipTextModel.from_pretrained(A )
self.assertIsNotNone(A )
def _lowercase( self , A=True ) -> str:
super().test_pt_tf_model_equivalence(allow_missing_keys=A )
| 265 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_a = logging.getLogger(__name__)
_a = tf.data.AUTOTUNE
def __a ( ):
UpperCAmelCase_ : Any = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config", type=__lowerCamelCase, default="roberta-base", help="The model config to use. Note that we don't copy the model's weights, only the config!", )
parser.add_argument(
"--tokenizer", type=__lowerCamelCase, default="unigram-tokenizer-wikitext", help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.", )
parser.add_argument(
"--per_replica_batch_size", type=__lowerCamelCase, default=8, help="Batch size per TPU core.", )
parser.add_argument(
"--no_tpu", action="store_true", help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.", )
parser.add_argument(
"--tpu_name", type=__lowerCamelCase, help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.", default="local", )
parser.add_argument(
"--tpu_zone", type=__lowerCamelCase, help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.", )
parser.add_argument(
"--gcp_project", type=__lowerCamelCase, help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16", action="store_true", help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.", )
parser.add_argument(
"--train_dataset", type=__lowerCamelCase, help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket.", )
parser.add_argument(
"--shuffle_buffer_size", type=__lowerCamelCase, default=2**18, help="Size of the shuffle buffer (in samples)", )
parser.add_argument(
"--eval_dataset", type=__lowerCamelCase, help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket.", )
parser.add_argument(
"--num_epochs", type=__lowerCamelCase, default=1, help="Number of epochs to train for.", )
parser.add_argument(
"--learning_rate", type=__lowerCamelCase, default=1E-4, help="Learning rate to use for training.", )
parser.add_argument(
"--weight_decay_rate", type=__lowerCamelCase, default=1E-3, help="Weight decay rate to use for training.", )
parser.add_argument(
"--max_length", type=__lowerCamelCase, default=512, help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py", )
parser.add_argument(
"--mlm_probability", type=__lowerCamelCase, default=0.15, help="Fraction of tokens to mask during training.", )
parser.add_argument("--output_dir", type=__lowerCamelCase, required=__lowerCamelCase, help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id", type=__lowerCamelCase, help="Model ID to upload to on the Hugging Face Hub." )
UpperCAmelCase_ : Dict = parser.parse_args()
return args
def __a ( __lowerCamelCase ):
try:
if args.tpu_name:
UpperCAmelCase_ : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
UpperCAmelCase_ : Dict = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(__lowerCamelCase )
tf.tpu.experimental.initialize_tpu_system(__lowerCamelCase )
return tpu
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = 0
for file in file_list:
UpperCAmelCase_ : Union[str, Any] = file.split("/" )[-1]
UpperCAmelCase_ : Union[str, Any] = re.search(r"-\d+-(\d+)\.tfrecord", __lowerCamelCase ).group(1 )
UpperCAmelCase_ : List[Any] = int(__lowerCamelCase )
num_samples += sample_count
return num_samples
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
UpperCAmelCase_ : Optional[int] = count_samples(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = tf.data.Dataset.from_tensor_slices(__lowerCamelCase )
if shuffle:
UpperCAmelCase_ : Dict = dataset.shuffle(len(__lowerCamelCase ) )
UpperCAmelCase_ : Any = tf.data.TFRecordDataset(__lowerCamelCase, num_parallel_reads=__lowerCamelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ : str = dataset.apply(tf.data.experimental.assert_cardinality(__lowerCamelCase ) )
UpperCAmelCase_ : Any = dataset.map(__lowerCamelCase, num_parallel_calls=__lowerCamelCase )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ : Optional[int] = dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ : Tuple = dataset.batch(__lowerCamelCase, drop_remainder=__lowerCamelCase )
UpperCAmelCase_ : Tuple = dataset.map(__lowerCamelCase, num_parallel_calls=__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = dataset.prefetch(__lowerCamelCase )
return dataset
def __a ( __lowerCamelCase ):
if not args.no_tpu:
UpperCAmelCase_ : List[str] = initialize_tpu(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = tf.distribute.TPUStrategy(__lowerCamelCase )
else:
UpperCAmelCase_ : Dict = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ : Any = tokenizer.vocab_size
UpperCAmelCase_ : Dict = tf.io.gfile.glob(os.path.join(args.train_dataset, "*.tfrecord" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
UpperCAmelCase_ : Union[str, Any] = tf.io.gfile.glob(os.path.join(args.eval_dataset, "*.tfrecord" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
UpperCAmelCase_ : Union[str, Any] = count_samples(__lowerCamelCase )
UpperCAmelCase_ : Any = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ : Any = steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ : Dict = TFAutoModelForMaskedLM.from_config(__lowerCamelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_ , UpperCAmelCase_ : int = create_optimizer(
num_train_steps=__lowerCamelCase, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__lowerCamelCase, metrics=["accuracy"] )
def decode_fn(__lowerCamelCase ):
UpperCAmelCase_ : Dict = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__lowerCamelCase, __lowerCamelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ : Optional[int] = DataCollatorForLanguageModeling(
tokenizer=__lowerCamelCase, mlm_probability=args.mlm_probability, mlm=__lowerCamelCase, return_tensors="tf" )
def mask_with_collator(__lowerCamelCase ):
# TF really needs an isin() function
UpperCAmelCase_ : List[Any] = (
~tf.cast(batch["attention_mask"], tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
UpperCAmelCase_ , UpperCAmelCase_ : Dict = data_collator.tf_mask_tokens(
batch["input_ids"], vocab_size=len(__lowerCamelCase ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__lowerCamelCase, )
return batch
UpperCAmelCase_ : List[str] = args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ : List[str] = prepare_dataset(
__lowerCamelCase, decode_fn=__lowerCamelCase, mask_fn=__lowerCamelCase, batch_size=__lowerCamelCase, shuffle=__lowerCamelCase, shuffle_buffer_size=args.shuffle_buffer_size, )
UpperCAmelCase_ : int = prepare_dataset(
__lowerCamelCase, decode_fn=__lowerCamelCase, mask_fn=__lowerCamelCase, batch_size=__lowerCamelCase, shuffle=__lowerCamelCase, )
UpperCAmelCase_ : str = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__lowerCamelCase ) )
model.fit(
__lowerCamelCase, validation_data=__lowerCamelCase, epochs=args.num_epochs, callbacks=__lowerCamelCase, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_a = parse_args()
main(args)
| 61 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
a : str = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
a : int = """main"""
# Default branch name
a : Any = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
a : str = """aaaaaaa"""
# This commit does not exist, so we should 404.
a : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
a : Any = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def __lowerCamelCase ( ) -> List[str]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Optional[int]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> List[Any]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class UpperCamelCase_ ( unittest.TestCase ):
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase( self , A ) -> Tuple:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase( self , A ) -> Dict:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _lowercase( self , A ) -> Union[str, Any]:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def _lowercase( self ) -> Optional[int]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class UpperCamelCase_ ( __magic_name__ ):
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def _lowercase( self ) -> int:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class UpperCamelCase_ ( __magic_name__ ):
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def _lowercase( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class UpperCamelCase_ ( __magic_name__ ):
pass
self.assertEqual(find_labels(A ) , [] )
| 265 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
if index == number_of_items:
return 0
__UpperCamelCase =0
__UpperCamelCase =0
__UpperCamelCase =knapsack(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
if weights[index] <= max_weight:
__UpperCamelCase =values[index] + knapsack(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , max_weight - weights[index] , index + 1 )
return max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 |
'''simple docstring'''
from itertools import count
def __lowerCamelCase ( _lowercase = 5_0 ) -> int:
UpperCAmelCase : Any = [1] * min_block_length
for n in count(_lowercase ):
fill_count_functions.append(1 )
for block_length in range(_lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 265 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =ShapEPipeline
__a =['prompt']
__a =['prompt']
__a =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
__a =False
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return 32
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
return 32
@property
def UpperCamelCase__ ( self : List[Any] ):
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return 8
@property
def UpperCamelCase__ ( self : Any ):
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(__a )
@property
def UpperCamelCase__ ( self : int ):
torch.manual_seed(0 )
_a = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
_a = PriorTransformer(**__a )
return model
@property
def UpperCamelCase__ ( self : Any ):
torch.manual_seed(0 )
_a = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
_a = ShapERenderer(**__a )
return model
def UpperCamelCase__ ( self : Tuple ):
_a = self.dummy_prior
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_renderer
_a = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=10_24 , prediction_type="sample" , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , )
_a = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def UpperCamelCase__ ( self : int , __a : List[str] , __a : str=0 ):
if str(__a ).startswith("mps" ):
_a = torch.manual_seed(__a )
else:
_a = torch.Generator(device=__a ).manual_seed(__a )
_a = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def UpperCamelCase__ ( self : Dict ):
_a = "cpu"
_a = self.get_dummy_components()
_a = self.pipeline_class(**__a )
_a = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_a = pipe(**self.get_dummy_inputs(__a ) )
_a = output.images[0]
_a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_a = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self : List[str] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ ( self : str ):
_a = torch_device == "cpu"
_a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__a , relax_max_difference=__a , )
def UpperCamelCase__ ( self : Tuple ):
_a = self.get_dummy_components()
_a = self.pipeline_class(**__a )
_a = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_a = 1
_a = 2
_a = self.get_dummy_inputs(__a )
for key in inputs.keys():
if key in self.batch_params:
_a = batch_size * [inputs[key]]
_a = pipe(**__a , num_images_per_prompt=__a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : int ):
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
_a = ShapEPipeline.from_pretrained("openai/shap-e" )
_a = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_a = torch.Generator(device=__a ).manual_seed(0 )
_a = pipe(
"a shark" , generator=__a , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__a , __a )
| 63 |
'''simple docstring'''
from __future__ import annotations
import math
class UpperCamelCase_ :
def __init__( self , A ) -> None:
UpperCAmelCase : Optional[int] = size
# approximate the overall size of segment tree with given value
UpperCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCAmelCase : Any = [0 for i in range(0 , 4 * size )]
UpperCAmelCase : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update
def _lowercase( self , A ) -> int:
return idx * 2
def _lowercase( self , A ) -> int:
return idx * 2 + 1
def _lowercase( self , A , A , A , A ) -> None:
if left_element == right_element:
UpperCAmelCase : str = a[left_element - 1]
else:
UpperCAmelCase : Tuple = (left_element + right_element) // 2
self.build(self.left(A ) , A , A , A )
self.build(self.right(A ) , mid + 1 , A , A )
UpperCAmelCase : str = max(
self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] )
def _lowercase( self , A , A , A , A , A , A ) -> bool:
if self.flag[idx] is True:
UpperCAmelCase : Optional[Any] = self.lazy[idx]
UpperCAmelCase : int = False
if left_element != right_element:
UpperCAmelCase : List[str] = self.lazy[idx]
UpperCAmelCase : Optional[Any] = self.lazy[idx]
UpperCAmelCase : List[str] = True
UpperCAmelCase : int = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCAmelCase : Optional[Any] = val
if left_element != right_element:
UpperCAmelCase : Tuple = val
UpperCAmelCase : int = val
UpperCAmelCase : Any = True
UpperCAmelCase : str = True
return True
UpperCAmelCase : str = (left_element + right_element) // 2
self.update(self.left(A ) , A , A , A , A , A )
self.update(self.right(A ) , mid + 1 , A , A , A , A )
UpperCAmelCase : List[str] = max(
self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] )
return True
def _lowercase( self , A , A , A , A , A ) -> int | float:
if self.flag[idx] is True:
UpperCAmelCase : Any = self.lazy[idx]
UpperCAmelCase : Any = False
if left_element != right_element:
UpperCAmelCase : Optional[Any] = self.lazy[idx]
UpperCAmelCase : Tuple = self.lazy[idx]
UpperCAmelCase : List[str] = True
UpperCAmelCase : Tuple = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCAmelCase : Dict = (left_element + right_element) // 2
UpperCAmelCase : List[Any] = self.query(self.left(A ) , A , A , A , A )
UpperCAmelCase : str = self.query(self.right(A ) , mid + 1 , A , A , A )
return max(A , A )
def __str__( self ) -> str:
return str([self.query(1 , 1 , self.size , A , A ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
a : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
a : Optional[Any] = 1_5
a : Union[str, Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 265 | 0 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
A_ = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
A_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
A_ = dict(zip(vocab, range(len(vocab))))
A_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = Path(tmpdirname)
A_ = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
A_ = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
A_ = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
A_ = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
A_ = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
A_ = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
A_ = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
A_ = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 64 |
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( _lowercase , _lowercase ) -> Image:
def brightness(_lowercase ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_lowercase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
a : Optional[Any] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 265 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
UpperCamelCase__ = ['bert-base-uncased', 'bert-base-cased']
UpperCamelCase__ = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class A ( tf.keras.Model ):
def __init__(self : Any , __UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = tokenizer
UpperCAmelCase__ = AutoConfig.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = TFAutoModel.from_config(__UpperCAmelCase )
def lowercase_ (self : int , __UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer(__UpperCAmelCase )
UpperCAmelCase__ = self.bert(**__UpperCAmelCase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class A ( unittest.TestCase ):
def lowercase_ (self : List[str] ) -> List[str]:
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = [
BertTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCAmelCase__ = [TFBertTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__UpperCAmelCase , use_fast_bert_tokenizer=__UpperCAmelCase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase__ = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="tf" , padding="longest" )
UpperCAmelCase__ = tf_tokenizer(__UpperCAmelCase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def lowercase_ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase__ = tf_tokenizer(self.paired_sentences )
UpperCAmelCase__ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def lowercase_ (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase__ = tf.function(__UpperCAmelCase )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCAmelCase__ = tf.constant(__UpperCAmelCase )
UpperCAmelCase__ = compiled_tokenizer(__UpperCAmelCase )
UpperCAmelCase__ = tf_tokenizer(__UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowercase_ (self : str ) -> List[str]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase__ = ModelToSave(tokenizer=__UpperCAmelCase )
UpperCAmelCase__ = tf.convert_to_tensor(self.test_sentences )
UpperCAmelCase__ = model(__UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase__ = Path(__UpperCAmelCase ) / "saved.model"
model.save(__UpperCAmelCase )
UpperCAmelCase__ = tf.keras.models.load_model(__UpperCAmelCase )
UpperCAmelCase__ = loaded_model(__UpperCAmelCase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 65 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Optional[int]:
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : Dict = use_input_mask
UpperCAmelCase : str = use_token_type_ids
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : int = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : List[str] = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : str = num_labels
UpperCAmelCase : Optional[int] = num_choices
UpperCAmelCase : Dict = scope
UpperCAmelCase : Union[str, Any] = vocab_size - 1
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowercase( self ) -> Optional[Any]:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase : Any = True
return config, input_ids, input_mask, token_labels
def _lowercase( self , A , A , A ) -> int:
UpperCAmelCase : str = GPTNeoXModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[str] = model(A , attention_mask=A )
UpperCAmelCase : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A ) -> Optional[int]:
UpperCAmelCase : str = True
UpperCAmelCase : Optional[Any] = GPTNeoXModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A ) -> List[str]:
UpperCAmelCase : Tuple = GPTNeoXForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : str = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A ) -> Tuple:
UpperCAmelCase : List[str] = self.num_labels
UpperCAmelCase : Any = GPTNeoXForQuestionAnswering(A )
model.to(A )
model.eval()
UpperCAmelCase : str = model(A , attention_mask=A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase( self , A , A , A , A ) -> int:
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A , A ) -> str:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Tuple = GPTNeoXForTokenClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase( self , A , A , A ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : str = GPTNeoXForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : List[str] = model(A , attention_mask=A , use_cache=A )
UpperCAmelCase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A )
UpperCAmelCase : Any = output_from_no_past["""hidden_states"""][0]
UpperCAmelCase : List[str] = model(
A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = GPTNeoXModelTester(self )
UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A , A , A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(A , A , A )
def _lowercase( self ) -> Optional[Any]:
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(A , A , A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*A )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def _lowercase( self ) -> Any:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _lowercase( self ) -> Optional[int]:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Dict = GPTNeoXModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : Any = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = GPTNeoXModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCAmelCase : int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(A )
UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase : List[str] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCAmelCase : Union[str, Any] = model.generate(**A , do_sample=A , max_new_tokens=20 )
UpperCAmelCase : Tuple = tokenizer.batch_decode(A )[0]
self.assertEqual(A , A )
| 265 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__a = logging.get_logger(__name__)
__a = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self: List[str] , snake_case: int=None , snake_case: str=None , *snake_case: Any , **snake_case: List[Any] ) -> Optional[int]:
super().__init__(*snake_case , **snake_case )
if config is None:
assert isinstance(self.model , snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
snake_case_ :Optional[Any] = self.model.config
else:
snake_case_ :int = config
snake_case_ :List[Any] = data_args
snake_case_ :str = self.config.tgt_vocab_size if isinstance(self.config , snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
""" padding..""" )
if self.args.label_smoothing == 0:
snake_case_ :Any = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
snake_case_ :Tuple = label_smoothed_nll_loss
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: int ) -> str:
if self.optimizer is None:
snake_case_ :List[Any] = ["""bias""", """LayerNorm.weight"""]
snake_case_ :Union[str, Any] = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
snake_case_ :str = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
snake_case_ :Optional[int] = Adafactor
snake_case_ :Dict = {"""scale_parameter""": False, """relative_step""": False}
else:
snake_case_ :Optional[Any] = AdamW
snake_case_ :Optional[Any] = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
snake_case_ :Any = self.args.learning_rate
if self.sharded_ddp:
snake_case_ :List[str] = OSS(
params=snake_case , optim=snake_case , **snake_case , )
else:
snake_case_ :Optional[int] = optimizer_cls(snake_case , **snake_case )
if self.lr_scheduler is None:
snake_case_ :List[Any] = self._get_lr_scheduler(snake_case )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[Any] ) -> str:
snake_case_ :int = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
snake_case_ :List[str] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
snake_case_ :Any = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
snake_case_ :int = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case )
return scheduler
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase_ ( self: List[str] , snake_case: Tuple , snake_case: Optional[int] , snake_case: Union[str, Any] ) -> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
snake_case_ :Union[str, Any] = model(**snake_case , use_cache=snake_case )[0]
snake_case_ :Tuple = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
snake_case_, snake_case_ :Any = model(**snake_case , labels=snake_case , use_cache=snake_case )[:2]
else:
# compute label smoothed loss
snake_case_ :List[Any] = model(**snake_case , use_cache=snake_case )[0]
snake_case_ :Any = torch.nn.functional.log_softmax(snake_case , dim=-1 )
snake_case_, snake_case_ :List[str] = self.loss_fn(snake_case , snake_case , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase_ ( self: str , snake_case: List[Any] , snake_case: List[Any] ) -> List[Any]:
snake_case_ :int = inputs.pop("""labels""" )
snake_case_, snake_case_ :Any = self._compute_loss(snake_case , snake_case , snake_case )
return loss
def lowerCAmelCase_ ( self: List[Any] , snake_case: nn.Module , snake_case: Dict[str, Union[torch.Tensor, Any]] , snake_case: bool , snake_case: Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
snake_case_ :Optional[int] = self._prepare_inputs(snake_case )
snake_case_ :Optional[int] = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
snake_case_ :Union[str, Any] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **snake_case , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
snake_case_ :Optional[int] = self._pad_tensors_to_max_len(snake_case , gen_kwargs["""max_length"""] )
snake_case_ :str = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
snake_case_, snake_case_ :str = self._compute_loss(snake_case , snake_case , snake_case )
snake_case_ :Optional[int] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
snake_case_ :Optional[int] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
snake_case_ :List[Any] = self._pad_tensors_to_max_len(snake_case , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowerCAmelCase_ ( self: str , snake_case: List[Any] , snake_case: Optional[int] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
snake_case_ :List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f""" padded to `max_length`={max_length}""" )
snake_case_ :List[str] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
snake_case_ :str = tensor
return padded_tensor
| 66 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
else:
return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> float:
if b < 0:
return 1 / actual_power(_lowercase , _lowercase )
return actual_power(_lowercase , _lowercase )
if __name__ == "__main__":
print(power(-2, -3))
| 265 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Union[str, Any] =["image_processor", "tokenizer"]
lowerCamelCase : int ="BlipImageProcessor"
lowerCamelCase : int ="AutoTokenizer"
def __init__( self : Dict , a : Any , a : Union[str, Any] , a : str ):
"""simple docstring"""
super().__init__(a , a )
# add QFormer tokenizer
__lowerCamelCase = qformer_tokenizer
def __call__( self : Union[str, Any] , a : ImageInput = None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[Any] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
__lowerCamelCase = BatchFeature()
if text is not None:
__lowerCamelCase = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
encoding.update(a )
__lowerCamelCase = self.qformer_tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
__lowerCamelCase = qformer_text_encoding.pop('''input_ids''' )
__lowerCamelCase = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
__lowerCamelCase = self.image_processor(a , return_tensors=a )
encoding.update(a )
return encoding
def SCREAMING_SNAKE_CASE__ ( self : Dict , *a : Dict , **a : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Any , *a : List[str] , **a : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer.model_input_names
__lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : str , **a : List[Any] ):
"""simple docstring"""
if os.path.isfile(a ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(a , exist_ok=a )
__lowerCamelCase = os.path.join(a , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(a )
return super().save_pretrained(a , **a )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str , a : List[str] , **a : Dict ):
"""simple docstring"""
__lowerCamelCase = AutoTokenizer.from_pretrained(a , subfolder='''qformer_tokenizer''' )
__lowerCamelCase = cls._get_arguments_from_pretrained(a , **a )
args.append(a )
return cls(*a )
| 67 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : Any = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = AlbertTokenizer
lowercase = AlbertTokenizerFast
lowercase = True
lowercase = True
lowercase = True
def _lowercase( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[int] = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self , A ) -> int:
UpperCAmelCase : Optional[int] = """this is a test"""
UpperCAmelCase : Dict = """this is a test"""
return input_text, output_text
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = """<pad>"""
UpperCAmelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(A ) , 30000 )
def _lowercase( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : List[str] = self.get_rust_tokenizer()
UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé."""
UpperCAmelCase : str = tokenizer.tokenize(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A )
UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = AlbertTokenizer(A )
UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" )
UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase( self ) -> Dict:
# fmt: off
UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 265 | 0 |
import argparse
import os
import re
import packaging.version
lowerCAmelCase__ = """examples/"""
lowerCAmelCase__ = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
lowerCAmelCase__ = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
lowerCAmelCase__ = """README.md"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: List[Any] ) -> int:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.read()
A__ , A__ = REPLACE_PATTERNS[pattern]
A__ = replace.replace("VERSION" , SCREAMING_SNAKE_CASE_ )
A__ = re_pattern.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Optional[Any]:
'''simple docstring'''
for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , pattern="examples" )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Union[str, Any]=False ) -> Optional[Any]:
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not patch:
update_version_in_examples(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
A__ = "🤗 Transformers currently provides the following architectures"
A__ = "1. Want to contribute a new model?"
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
A__ = f.readlines()
# Find the start of the list.
A__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
A__ = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
A__ = f.read()
A__ = REPLACE_PATTERNS["init"][0].search(SCREAMING_SNAKE_CASE_ ).groups()[0]
return packaging.version.parse(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any]=False ) -> Optional[Any]:
'''simple docstring'''
A__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
A__ = default_version.base_version
elif patch:
A__ = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
A__ = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
A__ = input(F'Which version are you releasing? [{default_version}]' )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
A__ = default_version
print(F'Updating version to {version}.' )
global_version_update(SCREAMING_SNAKE_CASE_ , patch=SCREAMING_SNAKE_CASE_ )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
A__ = get_version()
A__ = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
A__ = current_version.base_version
# Check with the user we got that right.
A__ = input(F'Which version are we developing now? [{dev_version}]' )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
A__ = dev_version
print(F'Updating version to {version}.' )
global_version_update(SCREAMING_SNAKE_CASE_ )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
lowerCAmelCase__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 68 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = StableDiffusionDiffEditPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase = frozenset([] )
def _lowercase( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , )
UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , )
UpperCAmelCase : List[Any] = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , )
torch.manual_seed(0 )
UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
UpperCAmelCase : Optional[Any] = CLIPTextModel(A )
UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowercase( self , A , A=0 ) -> Optional[Any]:
UpperCAmelCase : Any = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : List[Any] = torch.manual_seed(A )
else:
UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : int = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self , A , A=0 ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : Any = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self , A , A=0 ) -> str:
UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : str = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self ) -> List[Any]:
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
UpperCAmelCase : Dict = self.get_dummy_components()
UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A , A , A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase : Any = self.get_dummy_inputs(A )
UpperCAmelCase : Optional[Any] = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
UpperCAmelCase : Dict = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A , A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A )
UpperCAmelCase : Tuple = pipe_loaded(**A )[0]
UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max()
self.assertLess(A , 1e-4 )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[int] = """cpu"""
UpperCAmelCase : Optional[Any] = self.get_dummy_components()
UpperCAmelCase : List[str] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(A )
UpperCAmelCase : List[Any] = pipe.generate_mask(**A )
UpperCAmelCase : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase : Optional[int] = np.array([0] * 9 )
UpperCAmelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = """cpu"""
UpperCAmelCase : List[str] = self.get_dummy_components()
UpperCAmelCase : Optional[Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(A )
UpperCAmelCase : List[str] = pipe.invert(**A ).images
UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase : Dict = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
UpperCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
def _lowercase( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def _lowercase( self ) -> int:
UpperCAmelCase : List[Any] = """cpu"""
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""}
UpperCAmelCase : int = DPMSolverMultistepScheduler(**A )
UpperCAmelCase : int = DPMSolverMultistepInverseScheduler(**A )
UpperCAmelCase : List[str] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(A )
UpperCAmelCase : Any = pipe.invert(**A ).images
UpperCAmelCase : Dict = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase : Any = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _lowercase( cls ) -> Dict:
UpperCAmelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) )
UpperCAmelCase : List[str] = raw_image
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Dict = torch.manual_seed(0 )
UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa )
UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Tuple = """a bowl of fruit"""
UpperCAmelCase : List[Any] = """a bowl of pears"""
UpperCAmelCase : str = pipe.generate_mask(
image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , )
UpperCAmelCase : Tuple = pipe.invert(
prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents
UpperCAmelCase : Any = pipe(
prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
UpperCAmelCase : List[str] = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa )
UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : int = """a bowl of fruit"""
UpperCAmelCase : int = """a bowl of pears"""
UpperCAmelCase : str = pipe.generate_mask(
image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , )
UpperCAmelCase : Any = pipe.invert(
prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents
UpperCAmelCase : str = pipe(
prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
UpperCAmelCase : Tuple = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 265 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "sew"
def __init__( self, lowerCAmelCase__=32, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__=2, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-5, lowerCAmelCase__="group", lowerCAmelCase__="gelu", lowerCAmelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), lowerCAmelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), lowerCAmelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), lowerCAmelCase__=False, lowerCAmelCase__=128, lowerCAmelCase__=16, lowerCAmelCase__=True, lowerCAmelCase__=0.05, lowerCAmelCase__=10, lowerCAmelCase__=2, lowerCAmelCase__=0.0, lowerCAmelCase__=10, lowerCAmelCase__=0, lowerCAmelCase__="mean", lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=256, lowerCAmelCase__=0, lowerCAmelCase__=1, lowerCAmelCase__=2, **lowerCAmelCase__, ) -> str:
super().__init__(**lowerCAmelCase__, pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__)
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(lowerCAmelCase__)
snake_case_ = list(lowerCAmelCase__)
snake_case_ = list(lowerCAmelCase__)
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim)
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = squeeze_factor
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f'but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)'
f'= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# sequence classification
snake_case_ = use_weighted_layer_sum
snake_case_ = classifier_proj_size
@property
def a_ ( self) -> Optional[Any]:
return functools.reduce(operator.mul, self.conv_stride, 1)
| 69 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : Dict = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def __lowerCamelCase ( _lowercase ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape
UpperCAmelCase : Dict = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase : Optional[Any] = emb.weight.data
return lin_layer
def __lowerCamelCase ( _lowercase , _lowercase=None ) -> List[Any]:
UpperCAmelCase : Dict = {}
for old_key in state_dict.keys():
UpperCAmelCase : str = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase : int = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
UpperCAmelCase : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
UpperCAmelCase : str = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
UpperCAmelCase : Tuple = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
UpperCAmelCase : List[Any] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
UpperCAmelCase : Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase : str = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
UpperCAmelCase : Optional[int] = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
UpperCAmelCase : str = state_dict[old_key]
return new_dict
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ) -> Tuple:
UpperCAmelCase : Any = []
UpperCAmelCase : Dict = 0
os.makedirs(_lowercase , exist_ok=_lowercase )
for expert in range(_lowercase ):
UpperCAmelCase : str = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowercase ):
UpperCAmelCase : Optional[Any] = torch.load(_lowercase )["""model"""]
remove_ignore_keys_(_lowercase )
UpperCAmelCase : List[str] = rename_fairseq_keys(_lowercase , _lowercase )
UpperCAmelCase : Optional[Any] = os.path.join(
_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) )
torch.save(_lowercase , _lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowercase )[0]].dtype )
# Add the last block
UpperCAmelCase : Optional[Any] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) )
UpperCAmelCase : Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowercase )
UpperCAmelCase : Optional[Any] = rename_fairseq_keys(_lowercase , _lowercase )
UpperCAmelCase : int = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowercase ) == 1:
UpperCAmelCase : Dict = os.path.join(_lowercase , _lowercase )
torch.save(_lowercase , _lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowercase , _lowercase )
# Otherwise, let's build the index
UpperCAmelCase : Optional[int] = {}
for idx, shard in enumerate(_lowercase ):
UpperCAmelCase : List[str] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase ):05d}.bin''' )
UpperCAmelCase : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowercase , os.path.join(_lowercase , _lowercase ) )
for key in shard:
UpperCAmelCase : Tuple = shard_file
# Add the metadata
UpperCAmelCase : Any = {"""total_size""": total_size}
UpperCAmelCase : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowercase , _lowercase ) , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase : str = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + """\n"""
f.write(_lowercase )
return metadata, index
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a : int = parser.parse_args()
a , a : Any = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
a : str = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
a : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 265 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
_lowerCAmelCase = """"""
while len(lowerCAmelCase ) % 3 != 0:
_lowerCAmelCase = """0""" + bin_string
_lowerCAmelCase = [
bin_string[index : index + 3]
for index in range(len(lowerCAmelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_lowerCAmelCase = 0
for index, val in enumerate(lowerCAmelCase ):
oct_val += int(2 ** (2 - index) * int(lowerCAmelCase ) )
oct_string += str(lowerCAmelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 70 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
a : Union[str, Any] = torch.device("""cpu""")
def __lowerCamelCase ( ) -> Any:
UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
def __lowerCamelCase ( _lowercase ) -> Dict:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : Union[str, Any] = dct.pop(_lowercase )
UpperCAmelCase : str = val
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCAmelCase : Tuple = []
for k in state_dict.keys():
UpperCAmelCase : Dict = k
if ".pwconv" in k:
UpperCAmelCase : Union[str, Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
UpperCAmelCase : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
UpperCAmelCase : Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
UpperCAmelCase : Optional[Any] = k_new.split(""".""" )
if ls[2].isdigit():
UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase : List[Any] = 1_0_0_0
UpperCAmelCase : List[str] = """huggingface/label-files"""
UpperCAmelCase : Tuple = """imagenet-1k-id2label.json"""
UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Tuple = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Tuple = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase : List[Any] = [3, 3, 6, 4]
UpperCAmelCase : int = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase : str = [3, 3, 9, 6]
UpperCAmelCase : str = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase : List[Any] = [4, 3, 1_0, 5]
UpperCAmelCase : Union[str, Any] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase : Any = [4, 4, 1_2, 6]
UpperCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase )
else:
UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" )
UpperCAmelCase : str = checkpoint
UpperCAmelCase : Tuple = create_rename_keys(_lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# load HuggingFace model
UpperCAmelCase : str = SwiftFormerForImageClassification(_lowercase ).eval()
hf_model.load_state_dict(_lowercase )
# prepare test inputs
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
UpperCAmelCase : List[str] = processor(images=_lowercase , return_tensors="""pt""" )
# compare outputs from both models
UpperCAmelCase : List[str] = get_expected_output(_lowercase )
UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
a : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 265 | 0 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A_ :int = ['''small''', '''medium''', '''large''']
A_ :Union[str, Any] = '''lm_head.decoder.weight'''
A_ :Dict = '''lm_head.weight'''
def A ( a_ ,a_ ) -> List[Any]:
__UpperCamelCase : Optional[int] =torch.load(a_ )
__UpperCamelCase : Any =d.pop(a_ )
os.makedirs(a_ ,exist_ok=a_ )
torch.save(a_ ,os.path.join(a_ ,a_ ) )
if __name__ == "__main__":
A_ :Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
A_ :str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A_ :Any = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
A_ :Any = f"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 71 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowerCamelCase ( ) -> Any:
raise RuntimeError("""CUDA out of memory.""" )
class UpperCamelCase_ ( nn.Module ):
def __init__( self ) -> Any:
super().__init__()
UpperCAmelCase : Tuple = nn.Linear(3 , 4 )
UpperCAmelCase : Tuple = nn.BatchNormad(4 )
UpperCAmelCase : int = nn.Linear(4 , 5 )
def _lowercase( self , A ) -> Any:
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(A , [128, 64, 32, 16, 8] )
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase , UpperCAmelCase : Optional[int] = mock_training_loop_function("""hello""" )
self.assertListEqual(A , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def _lowercase( self ) -> Any:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(A ):
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A , A ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(A ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def _lowercase( self ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated()
UpperCAmelCase : List[str] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , A )
UpperCAmelCase : Tuple = release_memory(A )
self.assertEqual(torch.cuda.memory_allocated() , A )
| 265 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __snake_case :
def __init__( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any=3 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=9_9 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Any=5 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Union[str, Any]=3_7 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : List[Any]=1_6 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : int=None , ):
"""simple docstring"""
_lowerCamelCase : Dict = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Union[str, Any] = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_input_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : Any = use_labels
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : List[Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Tuple = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Any = num_labels
_lowerCamelCase : List[Any] = num_choices
_lowerCamelCase : Union[str, Any] = scope
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Dict = None
_lowerCamelCase : Any = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : int = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = FalconModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : int = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Any = FalconModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[str] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , )
_lowerCamelCase : Optional[int] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , )
_lowerCamelCase : int = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = FalconForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = FalconForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# first forward pass
_lowerCamelCase : Optional[Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase , )
_lowerCamelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCamelCase : int = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCamelCase : str = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )['''hidden_states'''][0]
_lowerCamelCase : str = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )['''hidden_states'''][0]
# select random slice
_lowerCamelCase : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[int] = config_and_inputs
_lowerCamelCase : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Tuple = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (FalconForCausalLM,) if is_torch_available() else ()
snake_case__ : Optional[Any] = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Optional[Any] = False
snake_case__ : int = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = FalconModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase , *_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_lowerCamelCase : Dict = alibi
self.model_tester.create_and_check_model(__lowerCAmelCase , *__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Dict = input_dict['''input_ids''']
_lowerCamelCase : Dict = input_ids.ne(1 ).to(__lowerCAmelCase )
_lowerCamelCase : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase : List[Any] = FalconForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = 3
_lowerCamelCase : Any = '''single_label_classification'''
_lowerCamelCase : Tuple = input_dict['''input_ids''']
_lowerCamelCase : Tuple = input_ids.ne(1 ).to(__lowerCAmelCase )
_lowerCamelCase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase : Optional[int] = FalconForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[Any] = input_dict['''input_ids''']
_lowerCamelCase : Dict = FalconForCausalLM(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : str = model(__lowerCAmelCase , use_cache=__lowerCAmelCase )
_lowerCamelCase : List[str] = input_ids.shape[0]
_lowerCamelCase : Optional[int] = model._convert_to_rw_cache(result.past_key_values )
_lowerCamelCase : List[Any] = model._convert_cache_to_standard_format(__lowerCAmelCase , __lowerCAmelCase )
for layer in range(len(__lowerCAmelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : str = 3
_lowerCamelCase : List[str] = '''multi_label_classification'''
_lowerCamelCase : Tuple = input_dict['''input_ids''']
_lowerCamelCase : str = input_ids.ne(1 ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCamelCase : str = FalconForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
for model_class in self.all_generative_model_classes:
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__lowerCAmelCase , '''use_cache''' ):
return
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
if "use_cache" not in inputs:
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_lowerCamelCase : Dict = (
getattr(__lowerCAmelCase , '''decoder_layers''' , __lowerCAmelCase )
or getattr(__lowerCAmelCase , '''num_decoder_layers''' , __lowerCAmelCase )
or config.num_hidden_layers
)
_lowerCamelCase : Any = getattr(__lowerCAmelCase , '''num_kv_heads''' , config.num_attention_heads )
_lowerCamelCase : int = getattr(__lowerCAmelCase , '''d_model''' , config.hidden_size )
_lowerCamelCase : Any = embed_dim // num_attention_heads
_lowerCamelCase : Tuple = outputs['''past_key_values''']
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Any = inputs['''input_ids'''].shape
for i in range(__lowerCAmelCase ):
if config.new_decoder_architecture:
_lowerCamelCase : Optional[int] = config.num_attention_heads
elif config.multi_query:
_lowerCamelCase : List[Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
_lowerCamelCase : List[Any] = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
_lowerCamelCase : Tuple = model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=1_9 )
_lowerCamelCase : Optional[Any] = tokenizer.batch_decode(__lowerCAmelCase )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : int = FalconForCausalLM.from_pretrained(__lowerCAmelCase )
model.eval()
model.to(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowerCAmelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=4 )
model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=4 )
model.generate(**__lowerCAmelCase , num_beams=2 , max_new_tokens=4 )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = FalconForCausalLM.from_pretrained(__lowerCAmelCase )
model.eval()
model.to(device=__lowerCAmelCase )
_lowerCamelCase : str = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__lowerCAmelCase )
# Test results are the same with and without cache
_lowerCamelCase : Dict = model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=2_0 , use_cache=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=2_0 , use_cache=__lowerCAmelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 72 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[int] = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 265 | 0 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = ['''pixel_values''']
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD ,**SCREAMING_SNAKE_CASE__ : int ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = size if size is not None else {'shortest_edge': 2_2_4}
__lowerCamelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCamelCase : str = get_size_dict(SCREAMING_SNAKE_CASE__ ,param_name='crop_size')
__lowerCamelCase : Any = do_resize
__lowerCamelCase : Optional[Any] = size
__lowerCamelCase : List[Any] = resample
__lowerCamelCase : Optional[int] = do_center_crop
__lowerCamelCase : Optional[int] = crop_size
__lowerCamelCase : List[str] = do_rescale
__lowerCamelCase : Optional[Any] = rescale_factor
__lowerCamelCase : Dict = do_normalize
__lowerCamelCase : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCamelCase : Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
__lowerCamelCase : Any = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowerCamelCase : List[str] = int((2_5_6 / 2_2_4) * size['shortest_edge'])
__lowerCamelCase : Union[str, Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}")
return resize(
SCREAMING_SNAKE_CASE__ ,size=(size_dict['height'], size_dict['width']) ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
__lowerCamelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__)
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}")
return center_crop(SCREAMING_SNAKE_CASE__ ,size=(size['height'], size['width']) ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[int, float] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
return rescale(SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
return normalize(SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : ImageInput ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, int]] = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, int]] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[float] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, Iterable[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, Iterable[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None ,SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
__lowerCamelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : Tuple = resample if resample is not None else self.resample
__lowerCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : Dict = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : Dict = image_std if image_std is not None else self.image_std
__lowerCamelCase : List[Any] = size if size is not None else self.size
__lowerCamelCase : Any = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase : Any = get_size_dict(SCREAMING_SNAKE_CASE__ ,param_name='crop_size')
__lowerCamelCase : Optional[Any] = make_list_of_images(SCREAMING_SNAKE_CASE__)
if not valid_images(SCREAMING_SNAKE_CASE__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
__lowerCamelCase : str = [to_numpy_array(SCREAMING_SNAKE_CASE__) for image in images]
if do_resize:
__lowerCamelCase : Any = [self.resize(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) for image in images]
if do_center_crop:
__lowerCamelCase : Union[str, Any] = [self.center_crop(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) for image in images]
if do_rescale:
__lowerCamelCase : Any = [self.rescale(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) for image in images]
if do_normalize:
__lowerCamelCase : List[Any] = [self.normalize(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) for image in images]
__lowerCamelCase : Optional[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) for image in images]
__lowerCamelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ ,tensor_type=SCREAMING_SNAKE_CASE__)
| 73 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = LongformerTokenizer
lowercase = True
lowercase = LongformerTokenizerFast
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCAmelCase : int = dict(zip(A , range(len(A ) ) ) )
UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def _lowercase( self , **A ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , **A ) -> int:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = """lower newer"""
UpperCAmelCase : Optional[int] = """lower newer"""
return input_text, output_text
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase : Dict = """lower newer"""
UpperCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
UpperCAmelCase : Tuple = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokens + [tokenizer.unk_token]
UpperCAmelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A )
UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A )
UpperCAmelCase : List[str] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : List[str] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : str = self.get_tokenizer()
UpperCAmelCase : List[Any] = """Encode this sequence."""
UpperCAmelCase : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A , A )
UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A , A )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A , A )
# Testing spaces after special tokens
UpperCAmelCase : Union[str, Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
UpperCAmelCase : Union[str, Any] = """Encode <mask> sequence"""
UpperCAmelCase : Union[str, Any] = """Encode <mask>sequence"""
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Union[str, Any] = encoded.index(A )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A , A )
UpperCAmelCase : Tuple = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = encoded.index(A )
UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A , A )
def _lowercase( self ) -> Optional[int]:
pass
def _lowercase( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence."""
UpperCAmelCase : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
UpperCAmelCase : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def _lowercase( self ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""trim_offsets"""] , A )
def _lowercase( self ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase : int = f'''{text_of_1_token} {text_of_1_token}'''
UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[Any] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
| 265 | 0 |
"""simple docstring"""
import re
def _snake_case ( snake_case__ : str ):
A = re.compile(
r'^(?:0|94|\+94|0{2}94)' r'7(0|1|2|4|5|6|7|8)' r'(-| |)' r'\d{7}$' )
return bool(re.search(snake_case__ , snake_case__ ) )
if __name__ == "__main__":
_lowercase = '''0094702343221'''
print(is_sri_lankan_phone_number(phone)) | 74 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a : Tuple = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _lowercase( self , A , A , A ) -> Dict:
UpperCAmelCase : Union[str, Any] = ZeroShotClassificationPipeline(
model=A , tokenizer=A , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _lowercase( self , A , A ) -> Optional[int]:
UpperCAmelCase : Dict = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
# No kwarg
UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
UpperCAmelCase : str = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
UpperCAmelCase : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
UpperCAmelCase : Dict = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
# https://github.com/huggingface/transformers/issues/13846
UpperCAmelCase : str = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
A , [
{"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]}
for i in range(1 )
] , )
UpperCAmelCase : List[str] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
A , [
{"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]}
for i in range(2 )
] , )
with self.assertRaises(A ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(A ):
classifier(A , candidate_labels="""politics""" )
with self.assertRaises(A ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(A ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=A )
with self.assertRaises(A ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(A ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A , )
self.run_entailment_id(A )
def _lowercase( self , A ) -> Any:
UpperCAmelCase : Tuple = zero_shot_classifier.model.config
UpperCAmelCase : Union[str, Any] = config.labelaid
UpperCAmelCase : Tuple = zero_shot_classifier.entailment_id
UpperCAmelCase : Any = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
UpperCAmelCase : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCAmelCase : Any = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCAmelCase : List[str] = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
UpperCAmelCase : Tuple = original_labelaid
self.assertEqual(A , zero_shot_classifier.entailment_id )
@require_torch
def _lowercase( self ) -> str:
UpperCAmelCase : int = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
UpperCAmelCase : Union[str, Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
UpperCAmelCase : List[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[int] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
UpperCAmelCase : Optional[int] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
UpperCAmelCase : str = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
UpperCAmelCase : Tuple = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
UpperCAmelCase : Any = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 265 | 0 |
'''simple docstring'''
def a_ ( __snake_case : int | float | str ) -> tuple[int, int]:
"""simple docstring"""
try:
lowerCamelCase_ =float(__snake_case )
except ValueError:
raise ValueError('''Please enter a valid number''' )
lowerCamelCase_ =decimal - int(__snake_case )
if fractional_part == 0:
return int(__snake_case ), 1
else:
lowerCamelCase_ =len(str(__snake_case ).split('''.''' )[1] )
lowerCamelCase_ =int(decimal * (10**number_of_frac_digits) )
lowerCamelCase_ =10**number_of_frac_digits
lowerCamelCase_, lowerCamelCase_ =denominator, numerator
while True:
lowerCamelCase_ =dividend % divisor
if remainder == 0:
break
lowerCamelCase_, lowerCamelCase_ =divisor, remainder
lowerCamelCase_, lowerCamelCase_ =numerator / divisor, denominator / divisor
return int(__snake_case ), int(__snake_case )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction("67") = }""")
print(F"""{decimal_to_fraction("45.0") = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction("6.25") = }""")
print(F"""{decimal_to_fraction("78td") = }""")
| 75 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a : List[Any] = """__DUMMY_TRANSFORMERS_USER__"""
a : Tuple = """Dummy User"""
a : Optional[Any] = """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"""
a : Optional[Any] = """https://hub-ci.huggingface.co"""
a : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
a : Tuple = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
a : str = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> List[str]:
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _lowercase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Any:
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
HfFolder.save_token(_lowercase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> str:
return HfApi(endpoint=_lowercase )
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : str = HfFolder.get_token()
HfFolder.save_token(_lowercase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Any:
def _cleanup_repo(_lowercase ):
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> List[str]:
@contextmanager
def _temporary_repo(_lowercase ):
try:
yield repo_id
finally:
cleanup_repo(_lowercase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
UpperCAmelCase : str = F'''repo_txt_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : List[Any] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data/text_data.txt""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : Optional[int] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple:
UpperCAmelCase : List[Any] = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : List[str] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
return hf_private_dataset_repo_zipped_img_data_
| 265 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='vit_msn'
def __init__( self : str , a : Tuple=768 , a : Tuple=12 , a : Any=12 , a : int=3072 , a : List[Any]="gelu" , a : Dict=0.0 , a : int=0.0 , a : str=0.02 , a : List[str]=1e-06 , a : List[Any]=224 , a : Union[str, Any]=16 , a : Union[str, Any]=3 , a : Tuple=True , **a : Dict , ) -> List[Any]:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = image_size
SCREAMING_SNAKE_CASE : Tuple = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[str] = qkv_bias | 76 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , **A ) -> List[str]:
super().__init__(**A )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , A , **A ) -> Optional[Any]:
return super().__call__(A , **A )
def _lowercase( self , **A ) -> Optional[Any]:
UpperCAmelCase : List[Any] = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Dict = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
UpperCAmelCase : Optional[Any] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase( self , A , A=None , A="This is a photo of {}." ) -> Optional[Any]:
UpperCAmelCase : int = load_image(A )
UpperCAmelCase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
UpperCAmelCase : List[str] = candidate_labels
UpperCAmelCase : Tuple = [hypothesis_template.format(A ) for x in candidate_labels]
UpperCAmelCase : Union[str, Any] = self.tokenizer(A , return_tensors=self.framework , padding=A )
UpperCAmelCase : Union[str, Any] = [text_inputs]
return inputs
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : List[Any] = model_inputs.pop("""candidate_labels""" )
UpperCAmelCase : Optional[Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , A ):
UpperCAmelCase : Optional[Any] = text_inputs[0]
else:
# Batching case.
UpperCAmelCase : Any = text_inputs[0][0]
UpperCAmelCase : Dict = self.model(**A , **A )
UpperCAmelCase : List[Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def _lowercase( self , A ) -> Union[str, Any]:
UpperCAmelCase : int = model_outputs.pop("""candidate_labels""" )
UpperCAmelCase : int = model_outputs["""logits"""][0]
if self.framework == "pt":
UpperCAmelCase : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCAmelCase : Any = probs.tolist()
if not isinstance(A , A ):
UpperCAmelCase : Any = [scores]
elif self.framework == "tf":
UpperCAmelCase : List[str] = stable_softmax(A , axis=-1 )
UpperCAmelCase : Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCAmelCase : Any = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(A , A ) , key=lambda A : -x[0] )
]
return result
| 265 | 0 |
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def a_ ( _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 , _lowerCAmelCase : float = 1 , _lowerCAmelCase : float = 1.0E4 , _lowerCAmelCase : bool = False , _lowerCAmelCase : float = 1.0 , ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even"""
lowercase__ : Optional[Any] = float(embedding_dim // 2 )
lowercase__ : Optional[Any] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowercase__ : Any = min_timescale * jnp.exp(jnp.arange(_lowerCAmelCase , dtype=jnp.floataa ) * -log_timescale_increment )
lowercase__ : Dict = jnp.expand_dims(_lowerCAmelCase , 1 ) * jnp.expand_dims(_lowerCAmelCase , 0 )
# scale embeddings
lowercase__ : List[str] = scale * emb
if flip_sin_to_cos:
lowercase__ : Dict = jnp.concatenate([jnp.cos(_lowerCAmelCase ), jnp.sin(_lowerCAmelCase )] , axis=1 )
else:
lowercase__ : Optional[int] = jnp.concatenate([jnp.sin(_lowerCAmelCase ), jnp.cos(_lowerCAmelCase )] , axis=1 )
lowercase__ : List[Any] = jnp.reshape(_lowerCAmelCase , [jnp.shape(_lowerCAmelCase )[0], embedding_dim] )
return signal
class UpperCAmelCase_ ( nn.Module):
lowerCamelCase__ : int = 3_2
lowerCamelCase__ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , a ) -> Any:
lowercase__ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(a )
lowercase__ : Union[str, Any] = nn.silu(a )
lowercase__ : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(a )
return temb
class UpperCAmelCase_ ( nn.Module):
lowerCamelCase__ : int = 3_2
lowerCamelCase__ : bool = False
lowerCamelCase__ : float = 1
@nn.compact
def __call__( self , a ) -> str:
return get_sinusoidal_embeddings(
a , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 77 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
return getitem, k
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
return setitem, k, v
def __lowerCamelCase ( _lowercase ) -> int:
return delitem, k
def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]:
try:
return fun(_lowercase , *_lowercase ), None
except Exception as e:
return None, e
a : List[str] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a : List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a : int = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a : List[Any] = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a : Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : List[str] = HashMap(initial_block_size=4 )
UpperCAmelCase : Dict = {}
for _, (fun, *args) in enumerate(_lowercase ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase )
UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase )
assert my_res == py_res
assert str(_lowercase ) == str(_lowercase )
assert set(_lowercase ) == set(_lowercase )
assert len(_lowercase ) == len(_lowercase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowercase ) -> bool:
return not name.startswith("""_""" )
UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )}
UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )}
assert dict_public_names > hash_public_names
| 265 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a : List[str] = 2_5_0_0_0_4
a : List[str] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = MBartTokenizer
lowercase = MBartTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : str = MBartTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self ) -> int:
UpperCAmelCase : Optional[Any] = MBartTokenizer(A , keep_accents=A )
UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A )
UpperCAmelCase : int = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : str = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = 'facebook/mbart-large-en-ro'
lowercase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowercase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowercase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def _lowercase( cls ) -> Tuple:
UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
UpperCAmelCase : int = 1
return cls
def _lowercase( self ) -> Union[str, Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def _lowercase( self ) -> List[str]:
self.assertIn(A , self.tokenizer.all_special_ids )
UpperCAmelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase : int = self.tokenizer.decode(A , skip_special_tokens=A )
UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
UpperCAmelCase : int = 10
UpperCAmelCase : List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A )
self.assertEqual(len(A ) , A )
def _lowercase( self ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = tempfile.mkdtemp()
UpperCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
UpperCAmelCase : Tuple = MBartTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
UpperCAmelCase : Dict = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
UpperCAmelCase : Dict = targets["""input_ids"""]
UpperCAmelCase : Union[str, Any] = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 3034, 2, 250004]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250001,
} , )
| 265 | 0 |
'''simple docstring'''
def __lowercase ( __lowercase , __lowercase = False ) -> str:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
_A = F'''Expected string as input, found {type(__lowercase )}'''
raise ValueError(__lowercase )
if not isinstance(__lowercase , __lowercase ):
_A = F'''Expected boolean as use_pascal parameter, found {type(__lowercase )}'''
raise ValueError(__lowercase )
_A = input_str.split("_" )
_A = 0 if use_pascal else 1
_A = words[start_index:]
_A = [word[0].upper() + word[1:] for word in words_to_capitalize]
_A = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 79 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
a : List[str] = get_tests_dir("""fixtures""")
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase : Tuple = mock.Mock()
UpperCAmelCase : List[str] = 500
UpperCAmelCase : Any = {}
UpperCAmelCase : List[str] = HTTPError
UpperCAmelCase : str = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head:
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase( self ) -> Any:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def _lowercase( self ) -> Union[str, Any]:
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(A )
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
@classmethod
def _lowercase( cls ) -> Dict:
UpperCAmelCase : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def _lowercase( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token )
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token )
UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def _lowercase( self ) -> Optional[int]:
CustomImageProcessor.register_for_auto_class()
UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 265 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( a__ , unittest.TestCase ):
__UpperCAmelCase = CLIPTokenizer
__UpperCAmelCase = CLIPTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = {}
__UpperCAmelCase = False
def __a ( self ):
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a , range(len(a ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a ) )
def __a ( self , **a ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def __a ( self , **a ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def __a ( self , a ):
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def __a ( self ):
UpperCamelCase__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a )
self.assertListEqual(a , a )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def __a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a , **a )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a , **a )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def __a ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
UpperCamelCase__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
UpperCamelCase__ = f''' {text}'''
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
UpperCamelCase__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def __a ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def __a ( self ):
super().test_tokenization_python_rust_equals()
def __a ( self ):
# CLIP always lower cases letters
pass
| 80 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( _lowercase ) -> Tuple:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : Any = create_tensor(_lowercase )
UpperCAmelCase : Union[str, Any] = gather(_lowercase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : Any = [state.process_index]
UpperCAmelCase : Union[str, Any] = gather_object(_lowercase )
assert len(_lowercase ) == state.num_processes, F'''{gathered_obj}, {len(_lowercase )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : Optional[int] = create_tensor(_lowercase )
UpperCAmelCase : List[str] = broadcast(_lowercase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __lowerCamelCase ( _lowercase ) -> Tuple:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
UpperCAmelCase : Tuple = torch.arange(state.num_processes ).to(state.device )
UpperCAmelCase : Optional[Any] = pad_across_processes(_lowercase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __lowerCamelCase ( _lowercase ) -> Dict:
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase : Optional[Any] = create_tensor(_lowercase )
UpperCAmelCase : Optional[Any] = reduce(_lowercase , """sum""" )
UpperCAmelCase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}'''
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase : Tuple = create_tensor(_lowercase )
UpperCAmelCase : Optional[int] = reduce(_lowercase , """mean""" )
UpperCAmelCase : str = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}'''
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
def __lowerCamelCase ( ) -> int:
UpperCAmelCase : List[Any] = PartialState()
state.print(F'''State: {state}''' )
state.print("""testing gather""" )
test_gather(_lowercase )
state.print("""testing gather_object""" )
test_gather_object(_lowercase )
state.print("""testing broadcast""" )
test_broadcast(_lowercase )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(_lowercase )
state.print("""testing reduce_sum""" )
test_reduce_sum(_lowercase )
state.print("""testing reduce_mean""" )
test_reduce_mean(_lowercase )
if __name__ == "__main__":
main()
| 265 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.