code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {}
lowerCAmelCase_ = {}
lowerCAmelCase_ = {}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , ):
snake_case_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
snake_case_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
snake_case_ = format_type
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
snake_case_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
lowerCAmelCase_ = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
lowerCAmelCase_ = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
lowerCAmelCase_ = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
snake_case_ = get_format_type_from_alias(SCREAMING_SNAKE_CASE__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**SCREAMING_SNAKE_CASE__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 39
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_a : List[str] = logging.get_logger(__name__)
def a_ ( __magic_name__ , __magic_name__=False ) -> Optional[Any]:
"""simple docstring"""
snake_case : Tuple = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=False ) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
snake_case : List[str] = ''''''
else:
snake_case : str = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
snake_case : str = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case : int = in_proj_weight[
: config.hidden_size, :
]
snake_case : Union[str, Any] = in_proj_bias[: config.hidden_size]
snake_case : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case : List[str] = in_proj_weight[
-config.hidden_size :, :
]
snake_case : List[str] = in_proj_bias[-config.hidden_size :]
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
snake_case : Tuple = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
"""simple docstring"""
snake_case : Optional[Any] = dct.pop(__magic_name__ )
snake_case : Any = val
def a_ ( ) -> Tuple:
"""simple docstring"""
snake_case : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case : Any = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Tuple:
"""simple docstring"""
snake_case : List[Any] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=__magic_name__ , )
snake_case : Union[str, Any] = ViTHybridConfig(backbone_config=__magic_name__ , image_size=384 , num_labels=1_000 )
snake_case : str = False
# load original model from timm
snake_case : List[Any] = timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case : List[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(__magic_name__ )
snake_case : List[str] = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case : Dict = '''huggingface/label-files'''
snake_case : Dict = '''imagenet-1k-id2label.json'''
snake_case : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case : Tuple = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case : Optional[int] = idalabel
snake_case : int = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case : Any = ViTHybridModel(__magic_name__ ).eval()
else:
snake_case : str = ViTHybridForImageClassification(__magic_name__ ).eval()
model.load_state_dict(__magic_name__ )
# create image processor
snake_case : str = create_transform(**resolve_data_config({} , model=__magic_name__ ) )
snake_case : Optional[int] = transform.transforms
snake_case : str = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
snake_case : Optional[Any] = ViTHybridImageProcessor(
do_resize=__magic_name__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case : str = prepare_img()
snake_case : Optional[Any] = transform(__magic_name__ ).unsqueeze(0 )
snake_case : Optional[int] = processor(__magic_name__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
snake_case : Dict = model(__magic_name__ )
snake_case : Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
snake_case : Union[str, Any] = timm_model.forward_features(__magic_name__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__magic_name__ , outputs.pooler_output , atol=1e-3 )
else:
snake_case : int = timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(F"ybelkada/{vit_name}" )
processor.push_to_hub(F"ybelkada/{vit_name}" )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
_a : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 598
| 0
|
'''simple docstring'''
from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE_ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = -1 ) -> int:
if hi < 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = len(snake_case_ )
while lo < hi:
SCREAMING_SNAKE_CASE : str = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
SCREAMING_SNAKE_CASE : Optional[Any] = mid + 1
else:
SCREAMING_SNAKE_CASE : List[Any] = mid
return lo
def SCREAMING_SNAKE_CASE_ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = -1 ) -> int:
if hi < 0:
SCREAMING_SNAKE_CASE : Tuple = len(snake_case_ )
while lo < hi:
SCREAMING_SNAKE_CASE : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
SCREAMING_SNAKE_CASE : int = mid + 1
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = mid
return lo
def SCREAMING_SNAKE_CASE_ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = -1 ) -> None:
sorted_collection.insert(bisect_left(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE_ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = -1 ) -> None:
sorted_collection.insert(bisect_right(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE_ ( snake_case_ : list[int] , snake_case_ : int ) -> int | None:
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : List[Any] = len(snake_case_ ) - 1
while left <= right:
SCREAMING_SNAKE_CASE : Union[str, Any] = left + (right - left) // 2
SCREAMING_SNAKE_CASE : Optional[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
SCREAMING_SNAKE_CASE : List[str] = midpoint - 1
else:
SCREAMING_SNAKE_CASE : int = midpoint + 1
return None
def SCREAMING_SNAKE_CASE_ ( snake_case_ : list[int] , snake_case_ : int ) -> int | None:
SCREAMING_SNAKE_CASE : Optional[int] = bisect.bisect_left(snake_case_ , snake_case_ )
if index != len(snake_case_ ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE_ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> int | None:
if right < left:
return None
SCREAMING_SNAKE_CASE : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(snake_case_ , snake_case_ , snake_case_ , midpoint - 1 )
else:
return binary_search_by_recursion(snake_case_ , snake_case_ , midpoint + 1 , snake_case_ )
if __name__ == "__main__":
__UpperCAmelCase = input('Enter numbers separated by comma:\n').strip()
__UpperCAmelCase = sorted(int(item) for item in user_input.split(','))
__UpperCAmelCase = int(input('Enter a single number to be found in the list:\n'))
__UpperCAmelCase = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 220
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__UpperCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE : int = state_dict.pop(snake_case_ )
SCREAMING_SNAKE_CASE : Optional[Any] = val
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE : Any = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
SCREAMING_SNAKE_CASE : List[Any] = value
else:
SCREAMING_SNAKE_CASE : Any = value
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[str] ) -> Any:
SCREAMING_SNAKE_CASE : List[str] = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE : Any = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : str = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE : int = in_proj_bias[:256]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE : str = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE : str = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE : Any = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE : Any = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE : List[str] = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE : Tuple = in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Any , snake_case_ : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = image.size
SCREAMING_SNAKE_CASE : Union[str, Any] = max(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : Optional[int] = 800 if 'detection' in checkpoint_url else 1000
SCREAMING_SNAKE_CASE : str = target_max_size / current_max_size
SCREAMING_SNAKE_CASE : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE : List[str] = F.to_tensor(snake_case_ )
SCREAMING_SNAKE_CASE : Tuple = F.normalize(snake_case_ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : List[Any] ) -> Tuple:
logger.info('Converting model...' )
# load original state dict
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.hub.load_state_dict_from_url(snake_case_ , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] = rename_backbone_keys(snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE : Dict = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(snake_case_ )
SCREAMING_SNAKE_CASE : Optional[Any] = val
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE : int = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE : Dict = 15
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Optional[int] = {0: 'table', 1: 'table rotated'}
SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE : List[str] = 125
SCREAMING_SNAKE_CASE : Dict = 6
SCREAMING_SNAKE_CASE : Optional[Any] = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
SCREAMING_SNAKE_CASE : List[Any] = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Any = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1000 )
SCREAMING_SNAKE_CASE : Tuple = TableTransformerForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# verify our conversion
SCREAMING_SNAKE_CASE : Optional[Any] = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
SCREAMING_SNAKE_CASE : str = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=snake_case_ )
SCREAMING_SNAKE_CASE : Dict = Image.open(snake_case_ ).convert('RGB' )
SCREAMING_SNAKE_CASE : Optional[int] = normalize(resize(snake_case_ , snake_case_ ) ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = model(snake_case_ )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE : Dict = (1, 15, 3)
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
SCREAMING_SNAKE_CASE : List[Any] = (1, 125, 7)
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
SCREAMING_SNAKE_CASE : int = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , snake_case_ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , snake_case_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
SCREAMING_SNAKE_CASE : Optional[Any] = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(snake_case_ )
image_processor.push_to_hub(snake_case_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 220
| 1
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Optional[Any] = False
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
lowerCamelCase__ : Optional[Any] = parser.parse_args()
lowerCamelCase__ : List[Any] = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
lowerCamelCase__ : str = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
lowerCamelCase__ : str = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
lowerCamelCase__ : Optional[int] = reader.read()
lowerCamelCase__ : Optional[int] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
lowerCamelCase__ : Any = UNetaDModel(**config)
else:
lowerCamelCase__ : Optional[Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
lowerCamelCase__ : Optional[Any] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCamelCase__ : Optional[Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCamelCase__ : List[str] = config[key]
del config[key]
lowerCamelCase__ : str = [k.replace('UNetRes', '') for k in config['down_block_types']]
lowerCamelCase__ : List[str] = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
lowerCamelCase__ : Optional[Any] = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
lowerCamelCase__ : Dict = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
lowerCamelCase__ : int = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
lowerCamelCase__ : Dict = param_value
lowerCamelCase__ : str = True
if not has_changed:
lowerCamelCase__ : Dict = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 31
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_torch
def lowerCAmelCase_ ( self : int ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_lowerCAmelCase )
BertModel.from_pretrained(_lowerCAmelCase )
BertTokenizer.from_pretrained(_lowerCAmelCase )
pipeline(task='fill-mask' , model=_lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Tuple ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_lowerCAmelCase )
BertModel.from_pretrained(_lowerCAmelCase )
BertTokenizer.from_pretrained(_lowerCAmelCase )
pipeline(task='fill-mask' , model=_lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import AutoModel\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 31
| 1
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase : Any ={
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
lowerCAmelCase : Any ={
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
lowerCAmelCase : Any ={
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
lowerCAmelCase : List[Any] ={
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
lowerCAmelCase : str ={
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
lowerCAmelCase : Optional[Any] ={
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def A__ ( __A ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def A__ ( __A , __A , __A , __A , __A=False ):
'''simple docstring'''
_lowerCamelCase : Tuple = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
_lowerCamelCase : int = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
_lowerCamelCase : Dict = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
_lowerCamelCase : str = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
_lowerCamelCase : int = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
_lowerCamelCase : List[str] = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
_lowerCamelCase : Tuple = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
_lowerCamelCase : Union[str, Any] = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
_lowerCamelCase : Optional[Any] = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
_lowerCamelCase : List[Any] = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_lowerCamelCase : Optional[Any] = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
_lowerCamelCase : List[Any] = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def A__ ( __A , __A , __A , __A , __A=None ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_lowerCamelCase : Union[str, Any] = checkpoint[F"""{old_prefix}.norm.weight"""]
_lowerCamelCase : Optional[Any] = checkpoint[F"""{old_prefix}.norm.bias"""]
_lowerCamelCase : Optional[int] = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Union[str, Any] = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Tuple = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : Union[str, Any] = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCamelCase : int = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_lowerCamelCase : int = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Any = torch.load(snake_case__ , map_location="""cpu""" )
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : List[str] = checkpoint["""time_embed.0.weight"""]
_lowerCamelCase : List[str] = checkpoint["""time_embed.0.bias"""]
_lowerCamelCase : Any = checkpoint["""time_embed.2.weight"""]
_lowerCamelCase : Tuple = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
_lowerCamelCase : Dict = checkpoint["""label_emb.weight"""]
_lowerCamelCase : Union[str, Any] = checkpoint["""input_blocks.0.0.weight"""]
_lowerCamelCase : Union[str, Any] = checkpoint["""input_blocks.0.0.bias"""]
_lowerCamelCase : Tuple = unet_config["""down_block_types"""]
_lowerCamelCase : Tuple = unet_config["""layers_per_block"""]
_lowerCamelCase : Dict = unet_config["""attention_head_dim"""]
_lowerCamelCase : int = unet_config["""block_out_channels"""]
_lowerCamelCase : Tuple = 1
_lowerCamelCase : List[Any] = channels_list[0]
for i, layer_type in enumerate(snake_case__ ):
_lowerCamelCase : Tuple = channels_list[i]
_lowerCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(snake_case__ ):
_lowerCamelCase : Optional[int] = F"""down_blocks.{i}.resnets.{j}"""
_lowerCamelCase : Union[str, Any] = F"""input_blocks.{current_layer}.0"""
_lowerCamelCase : List[Any] = True if j == 0 and downsample_block_has_skip else False
_lowerCamelCase : Tuple = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(snake_case__ ):
_lowerCamelCase : List[Any] = F"""down_blocks.{i}.resnets.{j}"""
_lowerCamelCase : Optional[int] = F"""input_blocks.{current_layer}.0"""
_lowerCamelCase : Optional[int] = True if j == 0 and downsample_block_has_skip else False
_lowerCamelCase : Optional[Any] = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ )
_lowerCamelCase : Any = F"""down_blocks.{i}.attentions.{j}"""
_lowerCamelCase : Tuple = F"""input_blocks.{current_layer}.1"""
_lowerCamelCase : int = convert_attention(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_lowerCamelCase : Any = F"""down_blocks.{i}.downsamplers.0"""
_lowerCamelCase : int = F"""input_blocks.{current_layer}.0"""
_lowerCamelCase : int = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
current_layer += 1
_lowerCamelCase : Dict = current_channels
# hardcoded the mid-block for now
_lowerCamelCase : Any = """mid_block.resnets.0"""
_lowerCamelCase : Any = """middle_block.0"""
_lowerCamelCase : Dict = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_lowerCamelCase : Tuple = """mid_block.attentions.0"""
_lowerCamelCase : Dict = """middle_block.1"""
_lowerCamelCase : Any = convert_attention(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_lowerCamelCase : int = """mid_block.resnets.1"""
_lowerCamelCase : str = """middle_block.2"""
_lowerCamelCase : Tuple = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_lowerCamelCase : Any = 0
_lowerCamelCase : str = unet_config["""up_block_types"""]
for i, layer_type in enumerate(snake_case__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCamelCase : List[str] = F"""up_blocks.{i}.resnets.{j}"""
_lowerCamelCase : Tuple = F"""output_blocks.{current_layer}.0"""
_lowerCamelCase : List[Any] = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_lowerCamelCase : List[str] = F"""up_blocks.{i}.upsamplers.0"""
_lowerCamelCase : Union[str, Any] = F"""output_blocks.{current_layer-1}.1"""
_lowerCamelCase : Any = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCamelCase : Optional[int] = F"""up_blocks.{i}.resnets.{j}"""
_lowerCamelCase : Any = F"""output_blocks.{current_layer}.0"""
_lowerCamelCase : List[str] = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ )
_lowerCamelCase : int = F"""up_blocks.{i}.attentions.{j}"""
_lowerCamelCase : Optional[int] = F"""output_blocks.{current_layer}.1"""
_lowerCamelCase : Union[str, Any] = convert_attention(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
_lowerCamelCase : Union[str, Any] = F"""up_blocks.{i}.upsamplers.0"""
_lowerCamelCase : Any = F"""output_blocks.{current_layer-1}.2"""
_lowerCamelCase : Union[str, Any] = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_lowerCamelCase : Dict = checkpoint["""out.0.weight"""]
_lowerCamelCase : Optional[Any] = checkpoint["""out.0.bias"""]
_lowerCamelCase : int = checkpoint["""out.2.weight"""]
_lowerCamelCase : int = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase : Any =argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
lowerCAmelCase : List[str] =parser.parse_args()
lowerCAmelCase : Tuple =strabool(args.class_cond)
lowerCAmelCase : Optional[Any] =os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase : List[Any] =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase : Dict =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase : List[Any] =TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
lowerCAmelCase : List[str] =None
lowerCAmelCase : Tuple =con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase : Optional[Any] =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase : List[Any] =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase : List[Any] =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase : List[Any] =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
lowerCAmelCase : List[str] =CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase : int =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 716
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = (EulerDiscreteScheduler,)
_snake_case = 10
def _SCREAMING_SNAKE_CASE ( self : Tuple , **_UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCamelCase)
return config
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : str = torch.manual_seed(0)
_lowerCamelCase : str = self.dummy_model()
_lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : int = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Dict = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Any = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""")
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps)
_lowerCamelCase : Any = torch.manual_seed(0)
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Dict = sample.to(_UpperCamelCase)
for i, t in enumerate(scheduler.timesteps):
_lowerCamelCase : Optional[int] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : Tuple = output.prev_sample
_lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : Optional[int] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : Optional[Any] = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Tuple = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : List[Any] = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : List[Any] = output.prev_sample
_lowerCamelCase : Any = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[Any] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase)
_lowerCamelCase : int = torch.manual_seed(0)
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCamelCase : Optional[int] = sample.to(_UpperCamelCase)
for t in scheduler.timesteps:
_lowerCamelCase : Tuple = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = model(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase)
_lowerCamelCase : int = output.prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(_UpperCamelCase))
_lowerCamelCase : List[str] = torch.mean(torch.abs(_UpperCamelCase))
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
| 15
| 0
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : Any=13 , lowerCamelCase : Union[str, Any]=32 , lowerCamelCase : Tuple=3 , lowerCamelCase : Any=4 , lowerCamelCase : Optional[int]=[10, 20, 30, 40] , lowerCamelCase : int=[2, 2, 3, 2] , lowerCamelCase : str=True , lowerCamelCase : Any=True , lowerCamelCase : Union[str, Any]=37 , lowerCamelCase : List[str]="gelu" , lowerCamelCase : Optional[int]=10 , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase : Union[str, Any]=[2, 3, 4] , lowerCamelCase : Dict=None , ) -> List[str]:
__snake_case : int = parent
__snake_case : int = batch_size
__snake_case : List[Any] = image_size
__snake_case : Any = num_channels
__snake_case : Optional[int] = num_stages
__snake_case : Union[str, Any] = hidden_sizes
__snake_case : List[str] = depths
__snake_case : Any = is_training
__snake_case : Tuple = use_labels
__snake_case : Dict = intermediate_size
__snake_case : Any = hidden_act
__snake_case : Optional[Any] = num_labels
__snake_case : int = initializer_range
__snake_case : Any = out_features
__snake_case : int = out_indices
__snake_case : Optional[Any] = scope
def __snake_case ( self : Optional[int] ) -> List[str]:
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Any = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : str = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : str ) -> List[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __snake_case ( self : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : str ) -> Any:
__snake_case : Any = ConvNextVaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[Any] = model(lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : str ) -> List[str]:
__snake_case : Any = ConvNextVaForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : int = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : List[Any] ) -> Dict:
__snake_case : Optional[int] = ConvNextVaBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[Any] = model(lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case : Union[str, Any] = None
__snake_case : Optional[int] = ConvNextVaBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[Any] = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __snake_case ( self : List[str] ) -> Union[str, Any]:
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : List[str] = config_and_inputs
__snake_case : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
def __snake_case ( self : str ) -> Union[str, Any]:
__snake_case : str = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : int = config_and_inputs
__snake_case : List[Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Optional[int] = False
def __snake_case ( self : Optional[int] ) -> List[Any]:
__snake_case : Union[str, Any] = ConvNextVaModelTester(self )
__snake_case : Any = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def __snake_case ( self : Optional[Any] ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : str ) -> Dict:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def __snake_case ( self : str ) -> str:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def __snake_case ( self : Optional[int] ) -> str:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def __snake_case ( self : Tuple ) -> Tuple:
pass
def __snake_case ( self : Tuple ) -> Optional[int]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
__snake_case : Optional[int] = True
if model_class.__name__ in [
*get_values(lowerCamelCase ),
*get_values(lowerCamelCase ),
]:
continue
__snake_case : Optional[int] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
__snake_case : List[Any] = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
__snake_case : List[Any] = model(**lowerCamelCase ).loss
loss.backward()
def __snake_case ( self : str ) -> Optional[Any]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
__snake_case : Tuple = False
__snake_case : List[str] = True
if (
model_class.__name__
in [*get_values(lowerCamelCase ), *get_values(lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__snake_case : Dict = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__snake_case : int = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
__snake_case : List[str] = model(**lowerCamelCase ).loss
loss.backward()
def __snake_case ( self : Optional[int] ) -> Tuple:
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(lowerCamelCase )
__snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Dict = [*signature.parameters.keys()]
__snake_case : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> str:
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : Dict ) -> Dict:
def check_hidden_states_output(lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : List[Any] ):
__snake_case : str = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : Tuple = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : int = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Dict = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : int ) -> str:
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def __snake_case ( self : Tuple ) -> str:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = ConvNextVaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase_ ( ):
__snake_case : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : Optional[int] ) -> List[str]:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def __snake_case ( self : int ) -> List[str]:
__snake_case : int = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(lowerCamelCase )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : Optional[Any] = prepare_img()
__snake_case : List[Any] = preprocessor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**lowerCamelCase )
# verify the logits
__snake_case : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : Optional[int] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
| 81
|
import argparse
import os
import re
import packaging.version
_lowerCAmelCase = """examples/"""
_lowerCAmelCase = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowerCAmelCase = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowerCAmelCase = """README.md"""
def lowercase ( _a ,_a ,_a ) -> List[Any]:
with open(_a ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
UpperCAmelCase_: List[str] = f.read()
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = REPLACE_PATTERNS[pattern]
UpperCAmelCase_: List[Any] = replace.replace("VERSION" ,_a )
UpperCAmelCase_: str = re_pattern.sub(_a ,_a )
with open(_a ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.write(_a )
def lowercase ( _a ) -> List[str]:
for folder, directories, fnames in os.walk(_a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_a ,_a ) ,_a ,pattern="examples" )
def lowercase ( _a ,_a=False ) -> Optional[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_a ,_a ,_a )
if not patch:
update_version_in_examples(_a )
def lowercase ( ) -> List[str]:
UpperCAmelCase_: int = "🤗 Transformers currently provides the following architectures"
UpperCAmelCase_: Dict = "1. Want to contribute a new model?"
with open(_a ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
UpperCAmelCase_: Tuple = f.readlines()
# Find the start of the list.
UpperCAmelCase_: Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase_: Tuple = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
UpperCAmelCase_: str = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" ,"https://huggingface.co/docs/transformers/model_doc" ,)
index += 1
with open(_a ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(_a )
def lowercase ( ) -> int:
with open(REPLACE_FILES["init"] ,"r" ) as f:
UpperCAmelCase_: List[str] = f.read()
UpperCAmelCase_: List[Any] = REPLACE_PATTERNS["init"][0].search(_a ).groups()[0]
return packaging.version.parse(_a )
def lowercase ( _a=False ) -> Optional[Any]:
UpperCAmelCase_: Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
UpperCAmelCase_: int = default_version.base_version
elif patch:
UpperCAmelCase_: Tuple = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
UpperCAmelCase_: int = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
UpperCAmelCase_: Dict = input(f"Which version are you releasing? [{default_version}]" )
if len(_a ) == 0:
UpperCAmelCase_: Union[str, Any] = default_version
print(f"Updating version to {version}." )
global_version_update(_a ,patch=_a )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def lowercase ( ) -> Union[str, Any]:
UpperCAmelCase_: Any = get_version()
UpperCAmelCase_: List[Any] = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
UpperCAmelCase_: int = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase_: Any = input(f"Which version are we developing now? [{dev_version}]" )
if len(_a ) == 0:
UpperCAmelCase_: str = dev_version
print(f"Updating version to {version}." )
global_version_update(_a )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowerCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 137
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowerCamelCase = logging.get_logger(__name__)
@dataclass
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : int =[
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self ,**_snake_case ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase_ : List[str] = deprecated_arg[3:]
UpperCAmelCase_ : Union[str, Any] = not kwargs.pop(UpperCAmelCase__ )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCAmelCase_ : Optional[int] = kwargs.pop("tpu_name" ,self.tpu_name )
UpperCAmelCase_ : List[Any] = kwargs.pop("device_idx" ,self.device_idx )
UpperCAmelCase_ : str = kwargs.pop("eager_mode" ,self.eager_mode )
UpperCAmelCase_ : Optional[Any] = kwargs.pop("use_xla" ,self.use_xla )
super().__init__(**UpperCAmelCase__ )
__A : str =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Name of TPU"} , )
__A : int =field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Benchmark models in eager model."})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def UpperCamelCase__ ( self ):
requires_backends(self ,["tf"] )
UpperCAmelCase_ : List[Any] = None
if self.tpu:
try:
if self.tpu_name:
UpperCAmelCase_ : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
UpperCAmelCase_ : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
UpperCAmelCase_ : str = None
return tpu
@cached_property
def UpperCamelCase__ ( self ):
requires_backends(self ,["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
UpperCAmelCase_ : Tuple = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,"GPU" )
UpperCAmelCase_ : List[Any] = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] ,"GPU" ) # disable GPU
UpperCAmelCase_ : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def UpperCamelCase__ ( self ):
requires_backends(self ,["tf"] )
return self._setup_tpu is not None
@property
def UpperCamelCase__ ( self ):
requires_backends(self ,["tf"] )
return self._setup_strategy
@property
def UpperCamelCase__ ( self ):
requires_backends(self ,["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def UpperCamelCase__ ( self ):
requires_backends(self ,["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCamelCase__ ( self ):
return self.n_gpu > 0
| 718
|
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = array[indexa], array[indexa]
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
if length > 1:
UpperCAmelCase_ : List[str] = int(length / 2 )
for i in range(_SCREAMING_SNAKE_CASE , low + middle ):
comp_and_swap(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i + middle , _SCREAMING_SNAKE_CASE )
bitonic_merge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
bitonic_merge(_SCREAMING_SNAKE_CASE , low + middle , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
if length > 1:
UpperCAmelCase_ : Tuple = int(length / 2 )
bitonic_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
bitonic_sort(_SCREAMING_SNAKE_CASE , low + middle , _SCREAMING_SNAKE_CASE , 0 )
bitonic_merge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCamelCase = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 323
| 0
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {"""vocab_file""": """vocab.txt"""}
_A = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
_A = {
"""facebook/esm2_t6_8M_UR50D""": 1024,
"""facebook/esm2_t12_35M_UR50D""": 1024,
}
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
with open(__a , '''r''' ) as f:
SCREAMING_SNAKE_CASE : Optional[Any] =f.read().splitlines()
return [l.strip() for l in lines]
class _lowerCAmelCase ( UpperCamelCase__ ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self , snake_case_ , snake_case_="<unk>" , snake_case_="<cls>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_="<eos>" , **snake_case_ , ) -> Optional[int]:
super().__init__(**snake_case_ )
SCREAMING_SNAKE_CASE : int =load_vocab_file(snake_case_ )
SCREAMING_SNAKE_CASE : List[str] =dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE : str ={tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE : Union[str, Any] =unk_token
SCREAMING_SNAKE_CASE : str =cls_token
SCREAMING_SNAKE_CASE : List[Any] =pad_token
SCREAMING_SNAKE_CASE : Union[str, Any] =mask_token
SCREAMING_SNAKE_CASE : Dict =eos_token
SCREAMING_SNAKE_CASE : Optional[int] =self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __a ( self , snake_case_ ) -> str:
return self._id_to_token.get(snake_case_ , self.unk_token )
def __a ( self , snake_case_ ) -> int:
return self._token_to_id.get(snake_case_ , self._token_to_id.get(self.unk_token ) )
def __a ( self , snake_case_ , **snake_case_ ) -> List[Any]:
return text.split()
def __a ( self , snake_case_=False ) -> List[Any]:
return len(self._id_to_token )
def __a ( self ) -> Union[str, Any]:
return {token: i for i, token in enumerate(self.all_tokens )}
def __a ( self , snake_case_ ) -> int:
return self._token_to_id.get(snake_case_ , self._token_to_id.get(self.unk_token ) )
def __a ( self , snake_case_ ) -> str:
return self._id_to_token.get(snake_case_ , self.unk_token )
def __a ( self , snake_case_ , snake_case_ = None ) -> List[int]:
SCREAMING_SNAKE_CASE : Optional[int] =[self.cls_token_id]
SCREAMING_SNAKE_CASE : Optional[Any] =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __a ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE : str =[1] + ([0] * len(snake_case_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(snake_case_ ) + [1]
return mask
def __a ( self , snake_case_ , snake_case_ ) -> List[str]:
SCREAMING_SNAKE_CASE : Tuple =os.path.join(snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(snake_case_ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __a ( self ) -> int:
return self.get_vocab_size(with_added_tokens=snake_case_ )
def __a ( self , snake_case_ , snake_case_ = False ) -> int:
return super()._add_tokens(snake_case_ , special_tokens=snake_case_ )
| 258
|
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
if not isinstance(__a , __a ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
SCREAMING_SNAKE_CASE : int =0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258
| 1
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A__ = """pt"""
elif is_tf_available():
A__ = """tf"""
else:
A__ = """jax"""
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = ByTaTokenizer
_UpperCAmelCase = False
def snake_case ( self : Dict ):
super().setUp()
lowerCamelCase :Optional[Any] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self : Dict ):
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def snake_case ( self : Any , **__snake_case : List[Any] ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : int=False , __snake_case : Union[str, Any]=20 , __snake_case : Tuple=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCamelCase :Optional[int] = []
for i in range(len(__snake_case ) ):
try:
lowerCamelCase :List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=__snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase :Optional[int] = list(filter(lambda __snake_case : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __snake_case ) )
lowerCamelCase :List[str] = list(filter(lambda __snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__snake_case ) , __snake_case ) )
if max_length is not None and len(__snake_case ) > max_length:
lowerCamelCase :List[Any] = toks[:max_length]
if min_length is not None and len(__snake_case ) < min_length and len(__snake_case ) > 0:
while len(__snake_case ) < min_length:
lowerCamelCase :Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase :List[str] = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase :Any = tokenizer.decode(__snake_case , clean_up_tokenization_spaces=__snake_case )
if " " not in output_txt and len(__snake_case ) > 1:
lowerCamelCase :List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__snake_case )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__snake_case )
)
if with_prefix_space:
lowerCamelCase :List[str] = ''' ''' + output_txt
lowerCamelCase :Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
return output_txt, output_ids
def snake_case ( self : List[str] ):
lowerCamelCase :List[str] = self.ta_base_tokenizer
lowerCamelCase :List[str] = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
lowerCamelCase :Optional[int] = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = self.ta_base_tokenizer
lowerCamelCase :Union[str, Any] = '''Unicode €.'''
lowerCamelCase :Optional[Any] = tokenizer(__snake_case )
lowerCamelCase :Union[str, Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , __snake_case )
# decoding
lowerCamelCase :Union[str, Any] = tokenizer.decode(__snake_case )
self.assertEqual(__snake_case , '''Unicode €.</s>''' )
lowerCamelCase :str = tokenizer('''e è é ê ë''' )
lowerCamelCase :Dict = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , __snake_case )
# decoding
lowerCamelCase :Optional[Any] = tokenizer.decode(__snake_case )
self.assertEqual(__snake_case , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def snake_case ( self : Any ):
lowerCamelCase :Tuple = self.ta_base_tokenizer
lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCamelCase :Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowerCamelCase :Any = tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
if FRAMEWORK != "jax":
lowerCamelCase :str = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase :Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__snake_case , __snake_case )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.ta_base_tokenizer
lowerCamelCase :Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase :Tuple = tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __snake_case )
self.assertIn('''attention_mask''' , __snake_case )
self.assertNotIn('''decoder_input_ids''' , __snake_case )
self.assertNotIn('''decoder_attention_mask''' , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Dict = self.ta_base_tokenizer
lowerCamelCase :int = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCamelCase :List[str] = tokenizer(
text_target=__snake_case , max_length=32 , padding='''max_length''' , truncation=__snake_case , return_tensors=__snake_case )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def snake_case ( self : List[str] ):
lowerCamelCase :Union[str, Any] = self.ta_base_tokenizer
lowerCamelCase :int = ['''A long paragraph for summarization. </s>''']
lowerCamelCase :Tuple = ['''Summary of the text. </s>''']
# fmt: off
lowerCamelCase :Dict = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowerCamelCase :Tuple = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowerCamelCase :Optional[int] = tokenizer(__snake_case , text_target=__snake_case )
self.assertEqual(__snake_case , batch['''input_ids'''][0] )
self.assertEqual(__snake_case , batch['''labels'''][0] )
def snake_case ( self : List[str] ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase :Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase :List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase :List[Any] = tempfile.mkdtemp()
lowerCamelCase :Optional[Any] = ''' He is very happy, UNwant\u00E9d,running'''
lowerCamelCase :int = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
tokenizer.save_pretrained(__snake_case )
lowerCamelCase :int = tokenizer.__class__.from_pretrained(__snake_case )
lowerCamelCase :Optional[Any] = after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
shutil.rmtree(__snake_case )
lowerCamelCase :Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase :int = tempfile.mkdtemp()
lowerCamelCase :Union[str, Any] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCamelCase :List[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCamelCase :int = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
tokenizer.save_pretrained(__snake_case )
lowerCamelCase :int = tokenizer.__class__.from_pretrained(__snake_case )
lowerCamelCase :Any = after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase :Optional[int] = tokenizer.__class__.from_pretrained(__snake_case , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__snake_case )
def snake_case ( self : List[Any] ):
lowerCamelCase :Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__snake_case )
with open(os.path.join(__snake_case , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCamelCase :List[Any] = json.load(__snake_case )
with open(os.path.join(__snake_case , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCamelCase :Any = json.load(__snake_case )
lowerCamelCase :Optional[int] = [F"<extra_id_{i}>" for i in range(125 )]
lowerCamelCase :Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCamelCase :Union[str, Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__snake_case , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__snake_case , __snake_case )
with open(os.path.join(__snake_case , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__snake_case , __snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase :List[Any] = tokenizer_class.from_pretrained(
__snake_case , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase :Any = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__snake_case )]
lowerCamelCase :Optional[int] = tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__snake_case )
lowerCamelCase :Tuple = tokenizer_class.from_pretrained(__snake_case )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Dict ):
pass
def snake_case ( self : Optional[Any] ):
pass
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : int ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
lowerCamelCase :Any = self.get_tokenizers(fast=__snake_case , do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowerCamelCase :str = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
lowerCamelCase :Tuple = tokenizer.convert_tokens_to_string(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def snake_case ( self : Tuple ):
lowerCamelCase :Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowerCamelCase :Union[str, Any] = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowerCamelCase :Dict = 0
lowerCamelCase :List[Any] = tokenizer.convert_ids_to_tokens(
__snake_case , skip_special_tokens=__snake_case )
for attr in attributes_list:
setattr(__snake_case , attr + '''_id''' , __snake_case )
self.assertEqual(getattr(__snake_case , __snake_case ) , __snake_case )
self.assertEqual(getattr(__snake_case , attr + '''_id''' ) , __snake_case )
setattr(__snake_case , attr + '''_id''' , __snake_case )
self.assertEqual(getattr(__snake_case , __snake_case ) , __snake_case )
self.assertEqual(getattr(__snake_case , attr + '''_id''' ) , __snake_case )
setattr(__snake_case , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(__snake_case , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(__snake_case , '''additional_special_tokens_ids''' ) , [] )
setattr(__snake_case , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(__snake_case , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(__snake_case , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 49
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : complex, UpperCamelCase_ : str = "x", UpperCamelCase_ : float = 10**-10, UpperCamelCase_ : int = 1, ):
UpperCAmelCase__ :Union[str, Any] = symbols(_UpperCamelCase )
UpperCAmelCase__ :Dict = lambdify(_UpperCamelCase, _UpperCamelCase )
UpperCAmelCase__ :str = lambdify(_UpperCamelCase, diff(_UpperCamelCase, _UpperCamelCase ) )
UpperCAmelCase__ :Optional[Any] = starting_point
while True:
if diff_function(_UpperCamelCase ) != 0:
UpperCAmelCase__ :str = prev_guess - multiplicity * func(_UpperCamelCase ) / diff_function(
_UpperCamelCase )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCAmelCase__ :Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}''')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F'''{newton_raphson("log(y) - 1", 2, variable="y")}''',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F'''{newton_raphson("exp(x) - 1", 10, precision=0.0_05)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
| 467
|
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ : int = logging.get_logger(__name__)
UpperCamelCase_ : Tuple = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def __a ( _UpperCamelCase: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_snake_case = k.replace(_UpperCamelCase , _UpperCamelCase )
if k.startswith("encoder" ):
_snake_case = k.replace(".attn" , ".self_attn" )
_snake_case = k.replace("norm1" , "self_attn_layer_norm" )
_snake_case = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
_snake_case = k.replace("norm1" , "self_attn_layer_norm" )
_snake_case = k.replace("norm2" , "encoder_attn_layer_norm" )
_snake_case = k.replace("norm3" , "final_layer_norm" )
return k
def __a ( _UpperCamelCase: Dict ) -> Optional[int]:
"""simple docstring"""
_snake_case = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
_snake_case = sd.pop(_UpperCamelCase )
_snake_case = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
_snake_case = v
UpperCamelCase_ : Union[str, Any] = ['''START''']
@torch.no_grad()
def __a ( _UpperCamelCase: Any , _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_snake_case = torch.load(_UpperCamelCase , map_location="cpu" )
_snake_case = model["model"]
_snake_case = BlenderbotConfig.from_json_file(_UpperCamelCase )
_snake_case = BlenderbotForConditionalGeneration(_UpperCamelCase )
_snake_case = m.model.state_dict().keys()
_snake_case = []
_snake_case = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_snake_case = rename_state_dict_key(_UpperCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_snake_case = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_UpperCamelCase )
m.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
m.half()
m.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
UpperCamelCase_ : Optional[int] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 185
| 0
|
'''simple docstring'''
from statistics import mean, stdev
def _A (lowerCAmelCase__ :list , lowerCAmelCase__ :int = 3 ) -> Union[str, Any]:
'''simple docstring'''
_a = min(_lowerCamelCase )
_a = max(_lowerCamelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _lowerCamelCase ) for x in data]
def _A (lowerCAmelCase__ :list , lowerCAmelCase__ :int = 3 ) -> int:
'''simple docstring'''
_a = mean(_lowerCamelCase )
_a = stdev(_lowerCamelCase )
# standardize data
return [round((x - mu) / (sigma) , _lowerCamelCase ) for x in data]
| 704
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=13_37 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _A (lowerCAmelCase__ :SplitDict ) -> Any:
'''simple docstring'''
_a = split_dict._to_yaml_list()
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
_a = SplitDict._from_yaml_list(lowerCAmelCase__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_a = None
# the split name of split_dict takes over the name of the split info object
_a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=lowerCAmelCase__ ), SplitInfo(dataset_name='my_dataset' )] )
def _A (lowerCAmelCase__ :Optional[Any] ) -> List[str]:
'''simple docstring'''
_a = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 532
| 0
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : str = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class lowercase_ ( __snake_case ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = """encodec"""
def __init__( self , __SCREAMING_SNAKE_CASE=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , __SCREAMING_SNAKE_CASE=24000 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=[8, 5, 4, 2] , __SCREAMING_SNAKE_CASE="weight_norm" , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="reflect" , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) ->Any:
lowerCAmelCase = target_bandwidths
lowerCAmelCase = sampling_rate
lowerCAmelCase = audio_channels
lowerCAmelCase = normalize
lowerCAmelCase = chunk_length_s
lowerCAmelCase = overlap
lowerCAmelCase = hidden_size
lowerCAmelCase = num_filters
lowerCAmelCase = num_residual_layers
lowerCAmelCase = upsampling_ratios
lowerCAmelCase = norm_type
lowerCAmelCase = kernel_size
lowerCAmelCase = last_kernel_size
lowerCAmelCase = residual_kernel_size
lowerCAmelCase = dilation_growth_rate
lowerCAmelCase = use_causal_conv
lowerCAmelCase = pad_mode
lowerCAmelCase = compress
lowerCAmelCase = num_lstm_layers
lowerCAmelCase = trim_right_ratio
lowerCAmelCase = codebook_size
lowerCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
lowerCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 312
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __lowercase ( __snake_case ):
UpperCamelCase = '''mobilenet_v1'''
def __init__( self : Any , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Dict=2_2_4 , __lowerCamelCase : Union[str, Any]=1.0 , __lowerCamelCase : Optional[int]=8 , __lowerCamelCase : Optional[int]="relu6" , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=0.999 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : str=0.001 , **__lowerCamelCase : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = depth_multiplier
UpperCAmelCase = min_depth
UpperCAmelCase = hidden_act
UpperCAmelCase = tf_padding
UpperCAmelCase = classifier_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
class __lowercase ( __snake_case ):
UpperCamelCase = version.parse('''1.11''' )
@property
def _lowercase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def _lowercase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def _lowercase ( self : List[Any] ) -> float:
"""simple docstring"""
return 1e-4
| 377
| 0
|
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
UpperCAmelCase_ : Tuple = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
UpperCAmelCase_ : List[Any] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def _lowerCAmelCase(a : Union[str, Any] ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE =numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=a )[0]
@deprecated(a , '''Please use tf.data to implement this functionality.''' )
def _lowerCAmelCase(a : str ) -> Optional[int]:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=a ) as bytestream:
_SCREAMING_SNAKE_CASE =_readaa(a )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
_SCREAMING_SNAKE_CASE =_readaa(a )
_SCREAMING_SNAKE_CASE =_readaa(a )
_SCREAMING_SNAKE_CASE =_readaa(a )
_SCREAMING_SNAKE_CASE =bytestream.read(rows * cols * num_images )
_SCREAMING_SNAKE_CASE =numpy.frombuffer(a , dtype=numpy.uinta )
_SCREAMING_SNAKE_CASE =data.reshape(a , a , a , 1 )
return data
@deprecated(a , '''Please use tf.one_hot on tensors.''' )
def _lowerCAmelCase(a : Tuple , a : Dict ) -> Dict:
_SCREAMING_SNAKE_CASE =labels_dense.shape[0]
_SCREAMING_SNAKE_CASE =numpy.arange(a ) * num_classes
_SCREAMING_SNAKE_CASE =numpy.zeros((num_labels, num_classes) )
_SCREAMING_SNAKE_CASE =1
return labels_one_hot
@deprecated(a , '''Please use tf.data to implement this functionality.''' )
def _lowerCAmelCase(a : Any , a : Any=False , a : Tuple=10 ) -> Optional[int]:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=a ) as bytestream:
_SCREAMING_SNAKE_CASE =_readaa(a )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
_SCREAMING_SNAKE_CASE =_readaa(a )
_SCREAMING_SNAKE_CASE =bytestream.read(a )
_SCREAMING_SNAKE_CASE =numpy.frombuffer(a , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(a , a )
return labels
class __UpperCAmelCase :
'''simple docstring'''
@deprecated(
_A , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self , _A , _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =random_seed.get_seed(_A )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_SCREAMING_SNAKE_CASE =dtypes.as_dtype(_A ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
_SCREAMING_SNAKE_CASE =1_0_0_0_0
_SCREAMING_SNAKE_CASE =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"""images.shape: {images.shape} labels.shape: {labels.shape}"""
_SCREAMING_SNAKE_CASE =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_SCREAMING_SNAKE_CASE =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_SCREAMING_SNAKE_CASE =images.astype(numpy.floataa )
_SCREAMING_SNAKE_CASE =numpy.multiply(_A , 1.0 / 255.0 )
_SCREAMING_SNAKE_CASE =images
_SCREAMING_SNAKE_CASE =labels
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._images
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._labels
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._num_examples
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._epochs_completed
def UpperCamelCase_ ( self , _A , _A=False , _A=True ):
'''simple docstring'''
if fake_data:
_SCREAMING_SNAKE_CASE =[1] * 7_8_4
_SCREAMING_SNAKE_CASE =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_A )],
[fake_label for _ in range(_A )],
)
_SCREAMING_SNAKE_CASE =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_SCREAMING_SNAKE_CASE =numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
_SCREAMING_SNAKE_CASE =self.images[perma]
_SCREAMING_SNAKE_CASE =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_SCREAMING_SNAKE_CASE =self._num_examples - start
_SCREAMING_SNAKE_CASE =self._images[start : self._num_examples]
_SCREAMING_SNAKE_CASE =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_SCREAMING_SNAKE_CASE =numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
_SCREAMING_SNAKE_CASE =self.images[perm]
_SCREAMING_SNAKE_CASE =self.labels[perm]
# Start next epoch
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =batch_size - rest_num_examples
_SCREAMING_SNAKE_CASE =self._index_in_epoch
_SCREAMING_SNAKE_CASE =self._images[start:end]
_SCREAMING_SNAKE_CASE =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_SCREAMING_SNAKE_CASE =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(a , '''Please write your own downloading logic.''' )
def _lowerCAmelCase(a : Any , a : str , a : Optional[int] ) -> Optional[Any]:
if not gfile.Exists(a ):
gfile.MakeDirs(a )
_SCREAMING_SNAKE_CASE =os.path.join(a , a )
if not gfile.Exists(a ):
urllib.request.urlretrieve(a , a ) # noqa: S310
with gfile.GFile(a ) as f:
_SCREAMING_SNAKE_CASE =f.size()
print('''Successfully downloaded''' , a , a , '''bytes.''' )
return filepath
@deprecated(
a , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def _lowerCAmelCase(a : Optional[int] , a : Tuple=False , a : str=False , a : Union[str, Any]=dtypes.floataa , a : Tuple=True , a : Tuple=5000 , a : Union[str, Any]=None , a : List[str]=DEFAULT_SOURCE_URL , ) -> List[Any]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=a , one_hot=a , dtype=a , seed=a )
_SCREAMING_SNAKE_CASE =fake()
_SCREAMING_SNAKE_CASE =fake()
_SCREAMING_SNAKE_CASE =fake()
return _Datasets(train=a , validation=a , test=a )
if not source_url: # empty string check
_SCREAMING_SNAKE_CASE =DEFAULT_SOURCE_URL
_SCREAMING_SNAKE_CASE ='''train-images-idx3-ubyte.gz'''
_SCREAMING_SNAKE_CASE ='''train-labels-idx1-ubyte.gz'''
_SCREAMING_SNAKE_CASE ='''t10k-images-idx3-ubyte.gz'''
_SCREAMING_SNAKE_CASE ='''t10k-labels-idx1-ubyte.gz'''
_SCREAMING_SNAKE_CASE =_maybe_download(
a , a , source_url + train_images_file )
with gfile.Open(a , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =_extract_images(a )
_SCREAMING_SNAKE_CASE =_maybe_download(
a , a , source_url + train_labels_file )
with gfile.Open(a , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =_extract_labels(a , one_hot=a )
_SCREAMING_SNAKE_CASE =_maybe_download(
a , a , source_url + test_images_file )
with gfile.Open(a , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =_extract_images(a )
_SCREAMING_SNAKE_CASE =_maybe_download(
a , a , source_url + test_labels_file )
with gfile.Open(a , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =_extract_labels(a , one_hot=a )
if not 0 <= validation_size <= len(a ):
_SCREAMING_SNAKE_CASE =(
'''Validation size should be between 0 and '''
f"""{len(a )}. Received: {validation_size}."""
)
raise ValueError(a )
_SCREAMING_SNAKE_CASE =train_images[:validation_size]
_SCREAMING_SNAKE_CASE =train_labels[:validation_size]
_SCREAMING_SNAKE_CASE =train_images[validation_size:]
_SCREAMING_SNAKE_CASE =train_labels[validation_size:]
_SCREAMING_SNAKE_CASE ={'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
_SCREAMING_SNAKE_CASE =_DataSet(a , a , **a )
_SCREAMING_SNAKE_CASE =_DataSet(a , a , **a )
_SCREAMING_SNAKE_CASE =_DataSet(a , a , **a )
return _Datasets(train=a , validation=a , test=a )
| 165
|
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _lowerCAmelCase(a : int ) -> Union[str, Any]:
return EnvironmentCommand()
def _lowerCAmelCase(a : str ) -> Optional[int]:
return EnvironmentCommand(args.accelerate_config_file )
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parser.add_parser('''env''' )
download_parser.set_defaults(func=_A )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_A , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_A )
def __init__( self , _A , *_A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =accelerate_config_file
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='''not installed'''
if is_safetensors_available():
import safetensors
_SCREAMING_SNAKE_CASE =safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
_SCREAMING_SNAKE_CASE =f"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
_SCREAMING_SNAKE_CASE ='''not installed'''
_SCREAMING_SNAKE_CASE =_SCREAMING_SNAKE_CASE ='''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_SCREAMING_SNAKE_CASE =accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_A ):
_SCREAMING_SNAKE_CASE =load_config_from_file(self._accelerate_config_file ).to_dict()
_SCREAMING_SNAKE_CASE =(
'''\n'''.join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_A , _A )
else f"""\t{accelerate_config}"""
)
_SCREAMING_SNAKE_CASE ='''not installed'''
_SCREAMING_SNAKE_CASE ='''NA'''
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE =torch.__version__
_SCREAMING_SNAKE_CASE =torch.cuda.is_available()
_SCREAMING_SNAKE_CASE ='''not installed'''
_SCREAMING_SNAKE_CASE ='''NA'''
if is_tf_available():
import tensorflow as tf
_SCREAMING_SNAKE_CASE =tf.__version__
try:
# deprecated in v2.1
_SCREAMING_SNAKE_CASE =tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_SCREAMING_SNAKE_CASE =bool(tf.config.list_physical_devices('''GPU''' ) )
_SCREAMING_SNAKE_CASE ='''not installed'''
_SCREAMING_SNAKE_CASE ='''not installed'''
_SCREAMING_SNAKE_CASE ='''not installed'''
_SCREAMING_SNAKE_CASE ='''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
_SCREAMING_SNAKE_CASE =flax.__version__
_SCREAMING_SNAKE_CASE =jax.__version__
_SCREAMING_SNAKE_CASE =jaxlib.__version__
_SCREAMING_SNAKE_CASE =jax.lib.xla_bridge.get_backend().platform
_SCREAMING_SNAKE_CASE ={
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f"""{safetensors_version}""",
'''Accelerate version''': f"""{accelerate_version}""",
'''Accelerate config''': f"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': f"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': f"""{flax_version} ({jax_backend})""",
'''Jax version''': f"""{jax_version}""",
'''JaxLib version''': f"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_A ) )
return info
@staticmethod
def UpperCamelCase_ ( _A ):
'''simple docstring'''
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 165
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=1_8 , _lowerCamelCase=3_0 , _lowerCamelCase=4_0_0 , _lowerCamelCase=True , _lowerCamelCase=3_2 , _lowerCamelCase=True , ):
UpperCamelCase_: Any = parent
UpperCamelCase_: Optional[Any] = batch_size
UpperCamelCase_: List[str] = num_channels
UpperCamelCase_: Optional[Any] = image_size
UpperCamelCase_: Optional[int] = min_resolution
UpperCamelCase_: List[str] = max_resolution
UpperCamelCase_: Tuple = do_resize
UpperCamelCase_: Union[str, Any] = size_divisor
UpperCamelCase_: Optional[int] = do_rescale
def _a ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : str =GLPNImageProcessor if is_vision_available() else None
def _a ( self ):
UpperCamelCase_: Tuple = GLPNImageProcessingTester(self )
@property
def _a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ):
UpperCamelCase_: List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'size_divisor' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'resample' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'do_rescale' ) )
def _a ( self ):
pass
def _a ( self ):
# Initialize image_processing
UpperCamelCase_: int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase_: List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _a ( self ):
# Initialize image_processing
UpperCamelCase_: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase_: Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _a ( self ):
# Initialize image_processing
UpperCamelCase_: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_: Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase_: str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 57
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCamelCase_ :
@staticmethod
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Dict ) -> str:
pass
def snake_case ( A__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCamelCase_ = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
__magic_name__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : int = pipeline(
"document-question-answering" , model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
UpperCAmelCase_ : int = INVOICE_URL
UpperCAmelCase_ : Union[str, Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
UpperCAmelCase_ : Optional[Any] = "What is the placebo?"
UpperCAmelCase_ : Tuple = [
{
"image": load_image(lowerCAmelCase_ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = dqa_pipeline(lowerCAmelCase_ , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [
[
{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ ), "start": ANY(lowerCAmelCase_ ), "end": ANY(lowerCAmelCase_ )},
{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ ), "start": ANY(lowerCAmelCase_ ), "end": ANY(lowerCAmelCase_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : Tuple = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
UpperCAmelCase_ : Dict = INVOICE_URL
UpperCAmelCase_ : int = "How many cats are there?"
UpperCAmelCase_ : Any = [
{"score": 0.0_0_0_1, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_0_0_1, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
UpperCAmelCase_ : List[str] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , lowerCAmelCase_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
UpperCAmelCase_ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ : Dict = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(lowerCAmelCase_ , [] )
# We can optionnally pass directly the words and bounding boxes
UpperCAmelCase_ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[Any] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , words=lowerCAmelCase_ , boxes=lowerCAmelCase_ , top_k=2 )
self.assertEqual(lowerCAmelCase_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
UpperCAmelCase_ : Dict = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
UpperCAmelCase_ : Optional[Any] = INVOICE_URL
UpperCAmelCase_ : Dict = "What is the invoice number?"
UpperCAmelCase_ : int = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : int = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : Tuple = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
UpperCAmelCase_ : Tuple = INVOICE_URL
UpperCAmelCase_ : Any = "What is the invoice number?"
UpperCAmelCase_ : str = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Optional[int] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : str = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase_ )
UpperCAmelCase_ : str = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase_ , revision="3dc6de3" , )
UpperCAmelCase_ : Any = INVOICE_URL
UpperCAmelCase_ : List[str] = "What is the invoice number?"
UpperCAmelCase_ : str = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCAmelCase_ : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCAmelCase_ : Union[str, Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
UpperCAmelCase_ : Dict = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
# This model should also work if `image` is set to None
UpperCAmelCase_ : List[str] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase_ )
UpperCAmelCase_ : str = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase_ , revision="3dc6de3" , max_seq_len=50 , )
UpperCAmelCase_ : List[Any] = INVOICE_URL
UpperCAmelCase_ : Optional[int] = "What is the invoice number?"
UpperCAmelCase_ : int = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
UpperCAmelCase_ : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
# This model should also work if `image` is set to None
UpperCAmelCase_ : Dict = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
UpperCAmelCase_ : Optional[int] = INVOICE_URL
UpperCAmelCase_ : int = "What is the invoice number?"
UpperCAmelCase_ : List[str] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
pass
| 95
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase: str = logging.get_logger(__name__)
_lowercase: Optional[int] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ ="camembert"
def __init__( self : str , lowercase__ : str=3_05_22 , lowercase__ : Optional[int]=7_68 , lowercase__ : Optional[Any]=12 , lowercase__ : List[str]=12 , lowercase__ : Optional[int]=30_72 , lowercase__ : Any="gelu" , lowercase__ : List[str]=0.1 , lowercase__ : Tuple=0.1 , lowercase__ : Dict=5_12 , lowercase__ : Union[str, Any]=2 , lowercase__ : Optional[int]=0.0_2 , lowercase__ : Tuple=1e-12 , lowercase__ : List[Any]=1 , lowercase__ : Tuple=0 , lowercase__ : Tuple=2 , lowercase__ : Optional[Any]="absolute" , lowercase__ : str=True , lowercase__ : Optional[int]=None , **lowercase__ : str , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
class lowerCamelCase__ ( UpperCAmelCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
if self.task == "multiple-choice":
_lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 225
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_lowerCAmelCase = {
'do_resize': True,
'size': {'height': 2_24, 'width': 2_24},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
'do_convert_rgb': True,
}
_lowerCAmelCase = os.path.join(self.tmpdirname , lowercase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **lowercase__ : Any ):
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **lowercase__ : List[Any] ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , **lowercase__ : Tuple ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase__ )
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase__ )
self.assertIsInstance(processor_fast.tokenizer , lowercase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase__ )
self.assertIsInstance(processor_fast.image_processor , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
_lowerCAmelCase = self.get_image_processor(do_normalize=lowercase__ )
_lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=lowercase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(lowercase__ , return_tensors='np' )
_lowerCAmelCase = processor(images=lowercase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
_lowerCAmelCase = 'Alexandra,T-shirt的价格是15便士。'
_lowerCAmelCase = processor(text=lowercase__ )
_lowerCAmelCase = tokenizer(lowercase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
_lowerCAmelCase = 'Alexandra,T-shirt的价格是15便士。'
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowercase__ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
_lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase = processor.batch_decode(lowercase__ )
_lowerCAmelCase = tokenizer.batch_decode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
_lowerCAmelCase = 'Alexandra,T-shirt的价格是15便士。'
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 225
| 1
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__UpperCamelCase : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class a ( datasets.BuilderConfig ):
snake_case__ = 1_0_0_0_0
snake_case__ = None
snake_case__ = None
class a ( datasets.ArrowBasedBuilder ):
snake_case__ = ParquetConfig
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_snake_case , (str, list, tuple) ):
lowerCAmelCase = data_files
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase = [dl_manager.iter_files(_snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase = [dl_manager.iter_files(_snake_case ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_snake_case ):
with open(_snake_case , 'rb' ) as f:
lowerCAmelCase = datasets.Features.from_arrow_schema(pq.read_schema(_snake_case ) )
break
splits.append(datasets.SplitGenerator(name=_snake_case , gen_kwargs={'files': files} ) )
return splits
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase = table_cast(_snake_case , self.info.features.arrow_schema )
return pa_table
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_snake_case ) ):
with open(_snake_case , 'rb' ) as f:
lowerCAmelCase = pq.ParquetFile(_snake_case )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowerCAmelCase = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'{file_idx}_{batch_idx}', self._cast_table(_snake_case )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(_snake_case )}: {e}' )
raise
| 4
|
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Sequence[int] | None = None ) -> int:
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
SCREAMING_SNAKE_CASE_ : Tuple =nums[0]
for i in range(1 , len(UpperCAmelCase_ ) ):
SCREAMING_SNAKE_CASE_ : Any =nums[i]
SCREAMING_SNAKE_CASE_ : Optional[int] =max(UpperCAmelCase_ , ans + num , UpperCAmelCase_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_lowercase = int(input("""Enter number of elements : """).strip())
_lowercase = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 443
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase :Tuple = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase :str = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__lowerCAmelCase :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 278
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCAmelCase :int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
__lowerCAmelCase :Union[str, Any] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__lowerCAmelCase :Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _a:
lowerCamelCase__ :Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
lowerCamelCase__ :Optional[str] = field(
default=__A , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase__ :Optional[str] = field(
default=__A , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
lowerCamelCase__ :Optional[str] = field(default=__A , metadata={'help': 'A folder containing the training data.'} )
lowerCamelCase__ :Optional[str] = field(default=__A , metadata={'help': 'A folder containing the validation data.'} )
lowerCamelCase__ :Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
lowerCamelCase__ :int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
lowerCamelCase__ :float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
lowerCamelCase__ :Optional[int] = field(
default=__A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase__ :Optional[int] = field(
default=__A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowercase ( self ) -> List[Any]:
'''simple docstring'''
_snake_case : List[Any] = {}
if self.train_dir is not None:
_snake_case : int = self.train_dir
if self.validation_dir is not None:
_snake_case : str = self.validation_dir
_snake_case : Optional[int] = data_files if data_files else None
@dataclass
class _a:
lowerCamelCase__ :str = field(
default=__A , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
lowerCamelCase__ :Optional[str] = field(
default=__A , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__A )} , )
lowerCamelCase__ :Optional[str] = field(
default=__A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase__ :Optional[str] = field(
default=__A , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowerCamelCase__ :Optional[str] = field(
default=__A , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
lowerCamelCase__ :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase__ :str = field(default=__A , metadata={'help': 'Name or path of preprocessor config.'} )
lowerCamelCase__ :bool = field(
default=__A , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase__ :Optional[int] = field(
default=__A , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
lowerCamelCase__ :Optional[int] = field(
default=__A , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
lowerCamelCase__ :Optional[int] = field(
default=__A , metadata={'help': 'Stride to use for the encoder.'} , )
class _a:
def __init__( self , __snake_case=1_9_2 , __snake_case=3_2 , __snake_case=4 , __snake_case=0.6 ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : Optional[int] = input_size
_snake_case : Optional[int] = mask_patch_size
_snake_case : Optional[Any] = model_patch_size
_snake_case : Any = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
_snake_case : Optional[int] = self.input_size // self.mask_patch_size
_snake_case : Optional[int] = self.mask_patch_size // self.model_patch_size
_snake_case : List[str] = self.rand_size**2
_snake_case : Any = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ) -> Tuple:
'''simple docstring'''
_snake_case : Optional[Any] = np.random.permutation(self.token_count )[: self.mask_count]
_snake_case : Dict = np.zeros(self.token_count , dtype=__snake_case )
_snake_case : Any = 1
_snake_case : Tuple = mask.reshape((self.rand_size, self.rand_size) )
_snake_case : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def A ( UpperCAmelCase ):
_snake_case : List[Any] = torch.stack([example["pixel_values"] for example in examples] )
_snake_case : Any = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case , _snake_case , _snake_case : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case , _snake_case , _snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" , UpperCAmelCase , UpperCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_snake_case : Any = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase )
transformers.utils.logging.set_verbosity(UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_snake_case : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
_snake_case : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_snake_case : str = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase ) and data_args.train_val_split > 0.0:
_snake_case : Tuple = ds["train"].train_test_split(data_args.train_val_split )
_snake_case : List[str] = split["train"]
_snake_case : Optional[Any] = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case : Optional[int] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase )
elif model_args.model_name_or_path:
_snake_case : int = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
_snake_case : Dict = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(UpperCAmelCase , "decoder_type" ):
_snake_case : Tuple = "simmim"
# adapt config
_snake_case : List[Any] = model_args.image_size if model_args.image_size is not None else config.image_size
_snake_case : Optional[Any] = model_args.patch_size if model_args.patch_size is not None else config.patch_size
_snake_case : str = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
_snake_case : List[Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase )
elif model_args.model_name_or_path:
_snake_case : List[str] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
_snake_case : str = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
_snake_case : Optional[Any] = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
_snake_case : Union[str, Any] = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
_snake_case : Any = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase )
if training_args.do_train:
_snake_case : Optional[int] = ds["train"].column_names
else:
_snake_case : List[str] = ds["validation"].column_names
if data_args.image_column_name is not None:
_snake_case : Dict = data_args.image_column_name
elif "image" in column_names:
_snake_case : Tuple = "image"
elif "img" in column_names:
_snake_case : str = "img"
else:
_snake_case : Dict = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
_snake_case : str = Compose(
[
Lambda(lambda UpperCAmelCase : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.6_7, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
_snake_case : List[str] = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(UpperCAmelCase ):
_snake_case : str = [transforms(UpperCAmelCase ) for image in examples[image_column_name]]
_snake_case : List[Any] = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_snake_case : Dict = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_snake_case : Union[str, Any] = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase )
# Initialize our trainer
_snake_case : int = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
_snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
_snake_case : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_snake_case : Optional[int] = last_checkpoint
_snake_case : Optional[Any] = trainer.train(resume_from_checkpoint=UpperCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_snake_case : Optional[Any] = trainer.evaluate()
trainer.log_metrics("eval" , UpperCAmelCase )
trainer.save_metrics("eval" , UpperCAmelCase )
# Write model card and (optionally) push to hub
_snake_case : Optional[Any] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase )
else:
trainer.create_model_card(**UpperCAmelCase )
if __name__ == "__main__":
main()
| 278
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
class __snake_case ( _lowercase):
snake_case__ : Tuple = "timm_backbone"
def __init__( self : Union[str, Any] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=None , **__lowerCAmelCase : str , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = backbone
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Optional[int] = features_only
_lowerCamelCase : List[Any] = use_pretrained_backbone
_lowerCamelCase : int = True
_lowerCamelCase : List[str] = out_indices if out_indices is not None else (-1,)
| 83
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'AutoTokenizer'
__lowerCamelCase = ['tokenizer']
__lowerCamelCase = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self :Dict , _lowercase :List[str] , _lowercase :List[Any]=None ):
'''simple docstring'''
super().__init__(_lowercase )
lowercase__ = speaker_embeddings
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :int , _lowercase :str="speaker_embeddings_path.json" , **_lowercase :List[str] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
lowercase__ = get_file_from_repo(
_lowercase , _lowercase , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_lowercase , _lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
lowercase__ = None
else:
with open(_lowercase ) as speaker_embeddings_json:
lowercase__ = json.load(_lowercase )
else:
lowercase__ = None
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(tokenizer=_lowercase , speaker_embeddings=_lowercase )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :List[str]="speaker_embeddings_path.json" , _lowercase :Any="speaker_embeddings" , _lowercase :bool = False , **_lowercase :Any , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowercase , _lowercase , "v2" ) , exist_ok=_lowercase )
lowercase__ = {}
lowercase__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase__ = self._load_voice_preset(_lowercase )
lowercase__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , _lowercase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowercase , )
lowercase__ = os.path.join(_lowercase , f'''{prompt_key}_{key}.npy''' )
lowercase__ = tmp_dict
with open(os.path.join(_lowercase , _lowercase ) , "w" ) as fp:
json.dump(_lowercase , _lowercase )
super().save_pretrained(_lowercase , _lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :str = None , **_lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = self.speaker_embeddings[voice_preset]
lowercase__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
lowercase__ = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
lowercase__ = np.load(_lowercase )
return voice_preset_dict
def UpperCAmelCase ( self :Optional[int] , _lowercase :Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self :Optional[Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=None , _lowercase :List[str]="pt" , _lowercase :List[Any]=2_56 , _lowercase :List[str]=False , _lowercase :Union[str, Any]=True , _lowercase :Dict=False , **_lowercase :Tuple , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(_lowercase , _lowercase ):
if (
isinstance(_lowercase , _lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase__ = self._load_voice_preset(_lowercase )
else:
if isinstance(_lowercase , _lowercase ) and not voice_preset.endswith(".npz" ):
lowercase__ = voice_preset + ".npz"
lowercase__ = np.load(_lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowercase , **_lowercase )
lowercase__ = BatchFeature(data=_lowercase , tensor_type=_lowercase )
lowercase__ = self.tokenizer(
_lowercase , return_tensors=_lowercase , padding="max_length" , max_length=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
if voice_preset is not None:
lowercase__ = voice_preset
return encoded_text
| 655
| 0
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ("""foo.json""",)] )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
snake_case : Any = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
snake_case : Optional[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : str = AutoConfig.from_pretrained("""gpt2""" )
snake_case : Optional[int] = GenerationConfig.from_model_config(A )
snake_case : Dict = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase ( self ) -> Dict:
snake_case : List[Any] = GenerationConfig()
snake_case : Union[str, Any] = {
"""max_new_tokens""": 1_0_2_4,
"""foo""": """bar""",
}
snake_case : int = copy.deepcopy(A )
snake_case : Optional[Any] = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {"""foo""": """bar"""} )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Optional[Any] = GenerationConfig()
snake_case : Optional[int] = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(A )
snake_case : str = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
snake_case : int = GenerationConfig.from_model_config(A )
assert not hasattr(A , """foo""" ) # no new kwargs should be initialized if from config
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Tuple = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
snake_case : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
snake_case : Any = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> str:
snake_case : int = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase ( cls ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def UpperCAmelCase ( self ) -> Dict:
snake_case : List[str] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
snake_case : str = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""test-generation-config""" , push_to_hub=A , use_auth_token=self._token )
snake_case : Optional[int] = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Dict = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
snake_case : Union[str, Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=A , use_auth_token=self._token )
snake_case : Union[str, Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 711
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684
| 0
|
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self , A , A , A ) -> Optional[Any]:
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for a, b in zip(lowercase__ , lowercase__ ):
self.assertAlmostEqual(lowercase__ , lowercase__ , delta=lowercase__ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowercase__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Optional[int] = None
ops.enable_eager_execution_internal()
_UpperCAmelCase : Optional[Any] = tf.config.list_physical_devices('''CPU''' )
if len(lowercase__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
_UpperCAmelCase : Dict = tf.config.list_logical_devices(device_type='''CPU''' )
_UpperCAmelCase : Optional[Any] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
_UpperCAmelCase : List[str] = GradientAccumulator()
_UpperCAmelCase : List[str] = tf.Variable([4.0, 3.0] )
_UpperCAmelCase , _UpperCAmelCase : Any = create_optimizer(5E-5 , 1_0 , 5 )
_UpperCAmelCase : Optional[int] = tf.Variable([0.0, 0.0] , trainable=lowercase__ )
def accumulate_on_replica(A ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(A , A ):
with strategy.scope():
_UpperCAmelCase : int = strategy.experimental_local_results(lowercase__ )
local_variables[0].assign(lowercase__ )
local_variables[1].assign(lowercase__ )
strategy.run(lowercase__ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowercase__ )
def _check_local_values(A , A ):
_UpperCAmelCase : Dict = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , lowercase__ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , lowercase__ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 506
|
from __future__ import annotations
from random import choice
def __a ( SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
return choice(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = random_pivot(SCREAMING_SNAKE_CASE )
# partition based on pivot
# linear time
__UpperCAmelCase = [e for e in lst if e < pivot]
__UpperCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(SCREAMING_SNAKE_CASE ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(SCREAMING_SNAKE_CASE ) < k - 1:
return kth_number(SCREAMING_SNAKE_CASE , k - len(SCREAMING_SNAKE_CASE ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 0
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _SCREAMING_SNAKE_CASE ( *snake_case_ , snake_case_ = None , snake_case_=True , snake_case_=2 ):
from .. import __version__
_lowercase = take_from
_lowercase = ()
if not isinstance(args[0] , snake_case_ ):
_lowercase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(snake_case_ ).base_version ) >= version.parse(snake_case_ ):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""" )
_lowercase = None
if isinstance(snake_case_ , snake_case_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(snake_case_ ),)
_lowercase = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(snake_case_ , snake_case_ ):
values += (getattr(snake_case_ , snake_case_ ),)
_lowercase = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
_lowercase = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
_lowercase = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , snake_case_ , stacklevel=snake_case_ )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) > 0:
_lowercase = inspect.getouterframes(inspect.currentframe() )[1]
_lowercase = call_frame.filename
_lowercase = call_frame.lineno
_lowercase = call_frame.function
_lowercase , _lowercase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(snake_case_ ) == 0:
return
elif len(snake_case_ ) == 1:
return values[0]
return values
| 704
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( snake_case_ = 100 ):
_lowercase = set()
_lowercase = 0
_lowercase = n + 1 # maximum limit
for a in range(2 , snake_case_ ):
for b in range(2 , snake_case_ ):
_lowercase = a**b # calculates the current power
collect_powers.add(snake_case_ ) # adds the result to the set
return len(snake_case_ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 572
| 0
|
import csv
import tweepy
# Twitter API credentials
SCREAMING_SNAKE_CASE_ : List[str] = ''''''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''
SCREAMING_SNAKE_CASE_ : List[Any] = ''''''
def SCREAMING_SNAKE_CASE ( snake_case ) -> None:
# authorize twitter, initialize tweepy
__lowercase = tweepy.OAuthHandler(__UpperCAmelCase , __UpperCAmelCase )
auth.set_access_token(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tweepy.API(__UpperCAmelCase )
# initialize a list to hold all the tweepy Tweets
__lowercase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowercase = api.user_timeline(screen_name=__UpperCAmelCase , count=200 )
# save most recent tweets
alltweets.extend(__UpperCAmelCase )
# save the id of the oldest tweet less one
__lowercase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__UpperCAmelCase ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
__lowercase = api.user_timeline(
screen_name=__UpperCAmelCase , count=200 , max_id=__UpperCAmelCase )
# save most recent tweets
alltweets.extend(__UpperCAmelCase )
# update the id of the oldest tweet less one
__lowercase = alltweets[-1].id - 1
print(F"...{len(__UpperCAmelCase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
__lowercase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , 'w' ) as f:
__lowercase = csv.writer(__UpperCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(__UpperCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 375
|
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase ) -> list:
lowerCAmelCase__ : List[Any] = len(__UpperCAmelCase )
for i in range(1 , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = collection[i]
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = i - 1
while low <= high:
lowerCAmelCase__ : str = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase__ : List[Any] = mid - 1
else:
lowerCAmelCase__ : Optional[int] = mid + 1
for j in range(__UpperCAmelCase , __UpperCAmelCase , -1 ):
lowerCAmelCase__ : Dict = collection[j - 1]
lowerCAmelCase__ : Union[str, Any] = val
return collection
if __name__ == "__main__":
_A = input("""Enter numbers separated by a comma:\n""").strip()
_A = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 299
| 0
|
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 13 , SCREAMING_SNAKE_CASE_ = 64 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1_28 , SCREAMING_SNAKE_CASE_=[16, 32, 64, 1_28] , SCREAMING_SNAKE_CASE_ = 7 , SCREAMING_SNAKE_CASE_ = 4 , SCREAMING_SNAKE_CASE_ = 37 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1_28 , SCREAMING_SNAKE_CASE_ = [2, 2, 2, 2] , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 2 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = encoder_stride
SCREAMING_SNAKE_CASE_ = num_attention_outputs
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = embed_dim + 1
SCREAMING_SNAKE_CASE_ = resolution
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = dim
SCREAMING_SNAKE_CASE_ = mlp_expansion_ratio
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def _lowercase (self ):
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFEfficientFormerModel(config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ = TFEfficientFormerForImageClassification(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = TFEfficientFormerForImageClassification(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( __lowercase , __lowercase , unittest.TestCase ):
UpperCAmelCase__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFEfficientFormerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def _lowercase (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''' )
def _lowercase (self ):
"""simple docstring"""
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''' )
def _lowercase (self ):
"""simple docstring"""
pass
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
if hasattr(self.model_tester , '''encoder_seq_length''' ):
SCREAMING_SNAKE_CASE_ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''' ) and self.model_tester.chunk_length > 1:
SCREAMING_SNAKE_CASE_ = seq_length * self.model_tester.chunk_length
else:
SCREAMING_SNAKE_CASE_ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ = outputs.decoder_hidden_states
self.asseretIsInstance(SCREAMING_SNAKE_CASE_ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , '''seq_length''' , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , '''decoder_seq_length''' , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''' )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase (self ):
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = TFEfficientFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , '''seq_length''' , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , '''encoder_seq_length''' , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , '''key_length''' , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , '''chunk_length''' , SCREAMING_SNAKE_CASE_ )
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes''' ):
SCREAMING_SNAKE_CASE_ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
SCREAMING_SNAKE_CASE_ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=SCREAMING_SNAKE_CASE_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs_dict is not None )
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def _lowercase (self ):
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''' )
if is_vision_available()
else None
)
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''' )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' )
# forward pass
SCREAMING_SNAKE_CASE_ = model(**SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# verify the logits
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''' )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' )
# forward pass
SCREAMING_SNAKE_CASE_ = model(**SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# verify the logits
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 628
|
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = TransfoXLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''<unk> UNwanted , running'''
SCREAMING_SNAKE_CASE_ = '''<unk> unwanted, running'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [0, 4, 8, 7] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
SCREAMING_SNAKE_CASE_ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 628
| 1
|
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
try:
_a : Tuple = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_a : Any = default
else:
# KEY is set, convert it to True or False.
try:
_a : int = strtobool(UpperCamelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
_snake_case = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__=None , UpperCamelCase__=None ):
'''simple docstring'''
if test_case is None:
return partial(UpperCamelCase__ , version=UpperCamelCase__ )
return unittest.skipUnless(is_torch_version(""">=""" , UpperCamelCase__ ) , F"""test requires torch version >= {version}""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(UpperCamelCase__ )
_snake_case = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(UpperCamelCase__ )
class UpperCamelCase ( unittest.TestCase ):
UpperCamelCase : Any = True
@classmethod
def _lowercase ( cls : Union[str, Any] ) -> str:
_a : Union[str, Any] = tempfile.mkdtemp()
@classmethod
def _lowercase ( cls : int ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _lowercase ( self : Dict ) -> List[str]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Tuple ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Union[mock.Mock, List[mock.Mock]] ) -> str:
_a : List[Any] = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = AcceleratorState()
_a : Union[str, Any] = tensor[None].clone().to(state.device )
_a : Optional[Any] = gather(UpperCamelCase__ ).cpu()
_a : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , UpperCamelCase__ ):
return False
return True
class UpperCamelCase :
def __init__( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ) -> Dict:
_a : Tuple = returncode
_a : Tuple = stdout
_a : str = stderr
async def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
while True:
_a : Optional[int] = await stream.readline()
if line:
callback(UpperCamelCase__ )
else:
break
async def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(UpperCamelCase__ ) )
_a : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=UpperCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCamelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_a : Dict = []
_a : Tuple = []
def tee(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="" ):
_a : Any = line.decode("""utf-8""" ).rstrip()
sink.append(UpperCamelCase__ )
if not quiet:
print(UpperCamelCase__ , UpperCamelCase__ , file=UpperCamelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda UpperCamelCase__ : tee(UpperCamelCase__ , UpperCamelCase__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda UpperCamelCase__ : tee(UpperCamelCase__ , UpperCamelCase__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=UpperCamelCase__ , )
return _RunOutput(await p.wait() , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=1_8_0 , UpperCamelCase__=False , UpperCamelCase__=True ):
'''simple docstring'''
_a : Tuple = asyncio.get_event_loop()
_a : str = loop.run_until_complete(
_stream_subprocess(UpperCamelCase__ , env=UpperCamelCase__ , stdin=UpperCamelCase__ , timeout=UpperCamelCase__ , quiet=UpperCamelCase__ , echo=UpperCamelCase__ ) )
_a : Optional[Any] = """ """.join(UpperCamelCase__ )
if result.returncode > 0:
_a : Optional[int] = """\n""".join(result.stderr )
raise RuntimeError(
F"""\'{cmd_str}\' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class UpperCamelCase ( _SCREAMING_SNAKE_CASE ):
pass
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
try:
_a : Union[str, Any] = subprocess.check_output(UpperCamelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(UpperCamelCase__ , """decode""" ):
_a : Union[str, Any] = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{' '.join(UpperCamelCase__ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 389
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase_ : int = ['input_ids', 'attention_mask']
def __init__( self :Any , UpperCamelCase__ :Tuple="</s>" , UpperCamelCase__ :str="<unk>" , UpperCamelCase__ :List[Any]="<pad>" , UpperCamelCase__ :Optional[int]=125 , UpperCamelCase__ :Union[str, Any]=None , **UpperCamelCase__ :List[Any] , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_a = [f'<extra_id_{i}>' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_a = len(set(filter(lambda UpperCamelCase__ : bool("extra_id" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
_a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
_a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
_a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
_a = extra_ids
_a = 2**8 # utf is 8 bits
# define special tokens dict
_a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_a = len(self.special_tokens_encoder )
_a = len(UpperCamelCase__ )
for i, token in enumerate(UpperCamelCase__ ):
_a = self.vocab_size + i - n
_a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def SCREAMING_SNAKE_CASE_ ( self :Any ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def SCREAMING_SNAKE_CASE_ ( self :int , UpperCamelCase__ :List[int] , UpperCamelCase__ :Optional[List[int]] = None , UpperCamelCase__ :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCamelCase__ )) + [1]
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :List[int] ):
if len(UpperCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , UpperCamelCase__ :List[int] , UpperCamelCase__ :Optional[List[int]] = None ):
_a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :List[int] , UpperCamelCase__ :Optional[List[int]] = None ):
_a = self._add_eos_if_not_present(UpperCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
_a = self._add_eos_if_not_present(UpperCamelCase__ )
return token_ids_a + token_ids_a
def SCREAMING_SNAKE_CASE_ ( self :List[str] , UpperCamelCase__ :str ):
_a = [chr(UpperCamelCase__ ) for i in text.encode("utf-8" )]
return tokens
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :List[Any] ):
if token in self.special_tokens_encoder:
_a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_a = self.added_tokens_encoder[token]
elif len(UpperCamelCase__ ) != 1:
_a = self.unk_token_id
else:
_a = ord(UpperCamelCase__ ) + self._num_special_tokens
return token_id
def SCREAMING_SNAKE_CASE_ ( self :List[str] , UpperCamelCase__ :List[str] ):
if index in self.special_tokens_decoder:
_a = self.special_tokens_decoder[index]
else:
_a = chr(index - self._num_special_tokens )
return token
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :Any ):
_a = B""
for token in tokens:
if token in self.special_tokens_decoder:
_a = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
_a = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
_a = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
_a = token.encode("utf-8" )
else:
_a = bytes([ord(UpperCamelCase__ )] )
bstring += tok_string
_a = bstring.decode("utf-8" , errors="ignore" )
return string
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :str , UpperCamelCase__ :Optional[str] = None ):
return ()
| 388
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A : int = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 702
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_A : List[str] = logging.get_logger(__name__)
_A : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
_A : int = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
_A : Dict = {
'''allenai/led-base-16384''': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __lowerCAmelCase ( ) -> List[Any]:
__lowerCamelCase: Optional[Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__lowerCamelCase: List[str] = bs[:]
__lowerCamelCase: Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case )
cs.append(2**8 + n )
n += 1
__lowerCamelCase: List[Any] = [chr(snake_case ) for n in cs]
return dict(zip(snake_case , snake_case ) )
def __lowerCAmelCase ( snake_case : List[str] ) -> str:
__lowerCamelCase: int = set()
__lowerCamelCase: str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase: Any = char
return pairs
class a ( _UpperCAmelCase ):
UpperCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[str] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict="replace" , SCREAMING_SNAKE_CASE_ : Dict="<s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Tuple="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : int=False , **SCREAMING_SNAKE_CASE_ : List[str] , ):
__lowerCamelCase: Tuple = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
__lowerCamelCase: Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
__lowerCamelCase: List[Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
__lowerCamelCase: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
__lowerCamelCase: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
__lowerCamelCase: Tuple = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase: List[str] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as vocab_handle:
__lowerCamelCase: str = json.load(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = {v: k for k, v in self.encoder.items()}
__lowerCamelCase: Any = errors # how to handle errors in decoding
__lowerCamelCase: List[Any] = bytes_to_unicode()
__lowerCamelCase: Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as merges_handle:
__lowerCamelCase: List[Any] = merges_handle.read().split("""\n""" )[1:-1]
__lowerCamelCase: Tuple = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCamelCase: str = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__lowerCamelCase: List[str] = {}
__lowerCamelCase: Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCamelCase: str = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ):
if token in self.cache:
return self.cache[token]
__lowerCamelCase: List[str] = tuple(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
__lowerCamelCase: List[str] = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase: List[str] = bigram
__lowerCamelCase: Optional[int] = []
__lowerCamelCase: Optional[Any] = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
__lowerCamelCase: int = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase: Tuple = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase: Optional[int] = tuple(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
__lowerCamelCase: Tuple = get_pairs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = """ """.join(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = word
return word
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Any ):
__lowerCamelCase: Any = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase: int = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(""" """ ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Tuple ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__lowerCamelCase: Any = """""".join(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: str = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase: Dict = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase: str = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + """\n""" )
__lowerCamelCase: List[str] = 0
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__lowerCamelCase: Tuple = token_index
writer.write(""" """.join(SCREAMING_SNAKE_CASE_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase: List[str] = [self.cls_token_id]
__lowerCamelCase: int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
__lowerCamelCase: int = [self.sep_token_id]
__lowerCamelCase: Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__lowerCamelCase: Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
__lowerCamelCase: Dict = """ """ + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Union[Dict[str, EncodedInput], BatchEncoding] , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ):
__lowerCamelCase: Optional[Any] = super()._pad(
encoded_inputs=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding_strategy=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
# Load from model defaults
if return_attention_mask is None:
__lowerCamelCase: Optional[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowerCamelCase: List[str] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowerCamelCase: str = len(encoded_inputs["""global_attention_mask"""] ) != len(SCREAMING_SNAKE_CASE_ )
if needs_to_be_padded:
__lowerCamelCase: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowerCamelCase: str = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__lowerCamelCase: Optional[int] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 189
| 0
|
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def a_ ( UpperCamelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase = 1
lowerCamelCase = 2
while i * i <= n:
lowerCamelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def a_ ( ) -> Any:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(UpperCamelCase_ ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 246
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_lowerCAmelCase : Tuple = 1.0_5457_1817e-34 # unit of ℏ : J * s
_lowerCAmelCase : int = 3e8 # unit of c : m * s^-1
def a_ ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> dict[str, float]:
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
lowerCamelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCamelCase = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCamelCase = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246
| 1
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ : int ) -> list[int]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = 2
lowerCAmelCase_ :Optional[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowercase__ )
if n > 1:
factors.append(lowercase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( lowercase__ : str ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(lowercase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase__ ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[str] = ["pixel_values"]
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BILINEAR , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , **__A , ) -> None:
super().__init__(**__A )
lowerCAmelCase_ :Tuple = size if size is not None else {"""shortest_edge""": 224}
lowerCAmelCase_ :Dict = get_size_dict(__A , default_to_square=__A )
lowerCAmelCase_ :int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCAmelCase_ :Tuple = get_size_dict(__A , param_name="""crop_size""" )
lowerCAmelCase_ :Union[str, Any] = do_resize
lowerCAmelCase_ :Optional[int] = size
lowerCAmelCase_ :Union[str, Any] = do_center_crop
lowerCAmelCase_ :Union[str, Any] = crop_size
lowerCAmelCase_ :Optional[Any] = resample
lowerCAmelCase_ :int = do_rescale
lowerCAmelCase_ :Dict = rescale_factor
lowerCAmelCase_ :List[str] = do_normalize
lowerCAmelCase_ :Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ :List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self , __A , __A , __A = PILImageResampling.BILINEAR , __A = None , **__A , ) -> np.ndarray:
lowerCAmelCase_ :List[Any] = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" in size:
lowerCAmelCase_ :Optional[Any] = get_resize_output_image_size(__A , size["""shortest_edge"""] , default_to_square=__A )
elif "height" in size and "width" in size:
lowerCAmelCase_ :Any = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
lowerCAmelCase_ :Any = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> Optional[int]:
return rescale(__A , scale=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase_ :List[Any] = to_numpy_array(__A )
if do_resize:
lowerCAmelCase_ :List[Any] = self.resize(image=__A , size=__A , resample=__A )
if do_center_crop:
lowerCAmelCase_ :List[Any] = self.center_crop(__A , size=__A )
if do_rescale:
lowerCAmelCase_ :int = self.rescale(image=__A , scale=__A )
if do_normalize:
lowerCAmelCase_ :str = self.normalize(image=__A , mean=__A , std=__A )
lowerCAmelCase_ :Tuple = to_channel_dimension_format(__A , __A )
return image
def __lowerCAmelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> PIL.Image.Image:
lowerCAmelCase_ :Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ :int = resample if resample is not None else self.resample
lowerCAmelCase_ :List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ :Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ :Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ :Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ :Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ :Tuple = image_std if image_std is not None else self.image_std
lowerCAmelCase_ :Tuple = size if size is not None else self.size
lowerCAmelCase_ :str = get_size_dict(__A , default_to_square=__A )
lowerCAmelCase_ :Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ :List[str] = get_size_dict(__A , param_name="""crop_size""" )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowerCAmelCase_ :List[Any] = make_batched(__A )
lowerCAmelCase_ :Dict = [
[
self._preprocess_image(
image=__A , do_resize=__A , size=__A , resample=__A , do_center_crop=__A , crop_size=__A , do_rescale=__A , rescale_factor=__A , do_normalize=__A , image_mean=__A , image_std=__A , data_format=__A , )
for img in video
]
for video in videos
]
lowerCAmelCase_ :Optional[Any] = {"""pixel_values""": videos}
return BatchFeature(data=__A , tensor_type=__A )
| 256
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class _snake_case ( SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__: Optional[int] = "ctrl"
lowerCamelCase__: List[str] = ["past_key_values"]
lowerCamelCase__: Any = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: int , __lowerCamelCase: Dict=24_65_34 , __lowerCamelCase: Dict=2_56 , __lowerCamelCase: List[str]=12_80 , __lowerCamelCase: Tuple=81_92 , __lowerCamelCase: List[str]=48 , __lowerCamelCase: Any=16 , __lowerCamelCase: Any=0.1 , __lowerCamelCase: Tuple=0.1 , __lowerCamelCase: Dict=1e-6 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: Tuple=True , **__lowerCamelCase: str , ) -> str:
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : List[str] = n_positions
__UpperCAmelCase : Union[str, Any] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : List[Any] = n_head
__UpperCAmelCase : Optional[int] = dff
__UpperCAmelCase : str = resid_pdrop
__UpperCAmelCase : Optional[Any] = embd_pdrop
__UpperCAmelCase : int = layer_norm_epsilon
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Optional[int] = use_cache
super().__init__(**lowerCamelCase__ )
| 382
|
from typing import Any
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
return np.array_equal(_lowerCAmelCase , matrix.conjugate().T )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Any = v.conjugate().T
A : List[Any] = v_star.dot(_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(_lowerCAmelCase )) / (v_star.dot(_lowerCAmelCase ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
A : str = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) )
A : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCAmelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCAmelCase , _lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 662
| 0
|
from __future__ import annotations
lowercase : Optional[int] = tuple[int, int, int]
lowercase : Dict = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowercase : int = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# -------------------------- default selection --------------------------
# rotors --------------------------
lowercase : Tuple = '''EGZWVONAHDCLFQMSIPJBYUKXTR'''
lowercase : Optional[Any] = '''FOBHMDKEXQNRAULPGSJVTYICZW'''
lowercase : Optional[Any] = '''ZJXESIUQLHAVRMDOYGTNFWPBKC'''
# reflector --------------------------
lowercase : Optional[Any] = {
'''A''': '''N''',
'''N''': '''A''',
'''B''': '''O''',
'''O''': '''B''',
'''C''': '''P''',
'''P''': '''C''',
'''D''': '''Q''',
'''Q''': '''D''',
'''E''': '''R''',
'''R''': '''E''',
'''F''': '''S''',
'''S''': '''F''',
'''G''': '''T''',
'''T''': '''G''',
'''H''': '''U''',
'''U''': '''H''',
'''I''': '''V''',
'''V''': '''I''',
'''J''': '''W''',
'''W''': '''J''',
'''K''': '''X''',
'''X''': '''K''',
'''L''': '''Y''',
'''Y''': '''L''',
'''M''': '''Z''',
'''Z''': '''M''',
}
# -------------------------- extra rotors --------------------------
lowercase : Any = '''RMDJXFUWGISLHVTCQNKYPBEZOA'''
lowercase : Dict = '''SGLCPQWZHKXAREONTFBVIYJUDM'''
lowercase : str = '''HVSICLTYKQUBXDWAJZOMFGPREN'''
lowercase : List[Any] = '''RZWQHFMVDBKICJLNTUXAGYPSOE'''
lowercase : Tuple = '''LFKIJODBEGAMQPXVUHYSTCZRWN'''
lowercase : str = '''KOAEGVDHXPQZMLFTYWJNBRCIUS'''
def lowerCAmelCase__ ( _a : RotorPositionT , _a : RotorSelectionT , _a : str ):
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(_a ) )) < 3:
snake_case_ : Tuple = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(_a )
# Checks if rotor positions are valid
snake_case_ : Tuple = rotpos
if not 0 < rotorposa <= len(_a ):
snake_case_ : int = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(_a )
if not 0 < rotorposa <= len(_a ):
snake_case_ : Dict = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_a )
if not 0 < rotorposa <= len(_a ):
snake_case_ : Any = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_a )
# Validates string and returns dict
snake_case_ : Optional[int] = _plugboard(_a )
return rotpos, rotsel, pbdict
def lowerCAmelCase__ ( _a : str ):
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(_a , _a ):
snake_case_ : Union[str, Any] = F'''Plugboard setting isn\'t type string ({type(_a )})'''
raise TypeError(_a )
elif len(_a ) % 2 != 0:
snake_case_ : Optional[Any] = F'''Odd number of symbols ({len(_a )})'''
raise Exception(_a )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
snake_case_ : Optional[int] = set()
for i in pbstring:
if i not in abc:
snake_case_ : Optional[Any] = F'''\'{i}\' not in list of symbols'''
raise Exception(_a )
elif i in tmppbl:
snake_case_ : Optional[int] = F'''Duplicate symbol ({i})'''
raise Exception(_a )
else:
tmppbl.add(_a )
del tmppbl
# Created the dictionary
snake_case_ : Optional[int] = {}
for j in range(0 , len(_a ) - 1 , 2 ):
snake_case_ : Optional[int] = pbstring[j + 1]
snake_case_ : int = pbstring[j]
return pb
def lowerCAmelCase__ ( _a : str , _a : RotorPositionT , _a : RotorSelectionT = (rotora, rotora, rotora) , _a : str = "" , ):
snake_case_ : Dict = text.upper()
snake_case_ : Union[str, Any] = _validator(
_a , _a , plugb.upper() )
snake_case_ : List[str] = rotor_position
snake_case_ : Dict = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
snake_case_ : Dict = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
snake_case_ : Tuple = plugboard[symbol]
# rotor ra --------------------------
snake_case_ : Dict = abc.index(_a ) + rotorposa
snake_case_ : List[str] = rotora[index % len(_a )]
# rotor rb --------------------------
snake_case_ : List[Any] = abc.index(_a ) + rotorposa
snake_case_ : Tuple = rotora[index % len(_a )]
# rotor rc --------------------------
snake_case_ : str = abc.index(_a ) + rotorposa
snake_case_ : int = rotora[index % len(_a )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
snake_case_ : Any = reflector[symbol]
# 2nd rotors
snake_case_ : Any = abc[rotora.index(_a ) - rotorposa]
snake_case_ : Optional[int] = abc[rotora.index(_a ) - rotorposa]
snake_case_ : Any = abc[rotora.index(_a ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
snake_case_ : int = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_a ):
snake_case_ : Optional[Any] = 0
rotorposa += 1
if rotorposa >= len(_a ):
snake_case_ : int = 0
rotorposa += 1
if rotorposa >= len(_a ):
snake_case_ : Optional[Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_a )
return "".join(_a )
if __name__ == "__main__":
lowercase : List[Any] = '''This is my Python script that emulates the Enigma machine from WWII.'''
lowercase : str = (1, 1, 1)
lowercase : Union[str, Any] = '''pictures'''
lowercase : List[Any] = (rotora, rotora, rotora)
lowercase : str = enigma(message, rotor_pos, rotor_sel, pb)
print('''Encrypted message:''', en)
print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
| 704
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = ['image_processor', 'tokenizer']
A : List[Any] = 'ViltImageProcessor'
A : Optional[Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> int:
snake_case_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _SCREAMING_SNAKE_CASE , )
snake_case_ : str = kwargs.pop("feature_extractor" )
snake_case_ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : str = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
snake_case_ : List[Any] = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# add pixel_values + pixel_mask
snake_case_ : str = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
encoding.update(_SCREAMING_SNAKE_CASE )
return encoding
def _lowerCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : List[Any] = self.tokenizer.model_input_names
snake_case_ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _lowerCAmelCase ( self ) -> Optional[int]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _SCREAMING_SNAKE_CASE , )
return self.image_processor
| 114
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ ):
_lowerCAmelCase = 'trocr'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__(self , _lowercase=50265 , _lowercase=1024 , _lowercase=12 , _lowercase=16 , _lowercase=4096 , _lowercase="gelu" , _lowercase=512 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=2 , _lowercase=0.02 , _lowercase=0.0 , _lowercase=True , _lowercase=False , _lowercase=True , _lowercase=True , _lowercase=1 , _lowercase=0 , _lowercase=2 , **_lowercase , ):
'''simple docstring'''
__a : List[Any] = vocab_size
__a : Any = d_model
__a : str = decoder_layers
__a : Any = decoder_attention_heads
__a : Dict = decoder_ffn_dim
__a : Optional[Any] = activation_function
__a : List[str] = max_position_embeddings
__a : List[str] = dropout
__a : List[str] = attention_dropout
__a : Tuple = activation_dropout
__a : int = init_std
__a : str = decoder_layerdrop
__a : Optional[int] = use_cache
__a : List[Any] = scale_embedding
__a : Optional[int] = use_learned_position_embeddings
__a : Union[str, Any] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , **_UpperCamelCase , )
| 581
|
'''simple docstring'''
def _A ( snake_case = 60_08_51_47_51_43 ) -> int:
try:
_lowercase : str = int(snake_case )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_lowercase : Union[str, Any] = 2
_lowercase : Dict = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_lowercase : Optional[Any] = i
while n % i == 0:
_lowercase : Dict = n // i
i += 1
return int(snake_case )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 245
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
__lowerCamelCase : Any = DisjunctiveConstraint(UpperCAmelCase__ )
self.assertTrue(isinstance(dc.token_ids , UpperCAmelCase__ ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : List[str] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(UpperCAmelCase__ ) # fails here
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Union[str, Any] = [[1, 2, 3], [1, 2, 4]]
__lowerCamelCase : Tuple = DisjunctiveConstraint(UpperCAmelCase__ )
__lowerCamelCase : Union[str, Any] = dc.update(1 )
__lowerCamelCase : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCamelCase : Dict = dc.update(2 )
__lowerCamelCase : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCamelCase : List[Any] = dc.update(3 )
__lowerCamelCase : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowerCamelCase : Dict = DisjunctiveConstraint(UpperCAmelCase__ )
__lowerCamelCase : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCamelCase : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCamelCase : Optional[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__lowerCamelCase : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__lowerCamelCase : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__lowerCamelCase : List[str] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCamelCase : Tuple = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 718
|
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _snake_case ( a__ , a__ ):
@register_to_config
def __init__( self : Optional[Any] , UpperCAmelCase : int = 128 , UpperCAmelCase : int = 256 , UpperCAmelCase : float = 2_0_0_0.0 , UpperCAmelCase : int = 768 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 64 , UpperCAmelCase : int = 2048 , UpperCAmelCase : float = 0.1 , ):
super().__init__()
__lowerCamelCase : List[Any] = nn.Sequential(
nn.Linear(UpperCAmelCase , d_model * 4 , bias=UpperCAmelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCAmelCase ) , nn.SiLU() , )
__lowerCamelCase : str = nn.Embedding(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Optional[int] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = nn.Dropout(p=UpperCAmelCase )
__lowerCamelCase : str = nn.ModuleList()
for lyr_num in range(UpperCAmelCase ):
# FiLM conditional T5 decoder
__lowerCamelCase : List[str] = DecoderLayer(d_model=UpperCAmelCase , d_kv=UpperCAmelCase , num_heads=UpperCAmelCase , d_ff=UpperCAmelCase , dropout_rate=UpperCAmelCase )
self.decoders.append(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = TaLayerNorm(UpperCAmelCase )
__lowerCamelCase : List[Any] = nn.Dropout(p=UpperCAmelCase )
__lowerCamelCase : Any = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : int ):
__lowerCamelCase : List[Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : str ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__lowerCamelCase : str = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__lowerCamelCase : Optional[int] = self.conditioning_emb(UpperCAmelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__lowerCamelCase : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__lowerCamelCase : int = torch.broadcast_to(
torch.arange(UpperCAmelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__lowerCamelCase : Optional[Any] = self.position_encoding(UpperCAmelCase )
__lowerCamelCase : List[str] = self.continuous_inputs_projection(UpperCAmelCase )
inputs += position_encodings
__lowerCamelCase : List[Any] = self.dropout(UpperCAmelCase )
# decoder: No padding present.
__lowerCamelCase : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__lowerCamelCase : Optional[Any] = [(x, self.encoder_decoder_mask(UpperCAmelCase , UpperCAmelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__lowerCamelCase : Union[str, Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__lowerCamelCase : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__lowerCamelCase : List[Any] = lyr(
UpperCAmelCase , conditioning_emb=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )[0]
__lowerCamelCase : Dict = self.decoder_norm(UpperCAmelCase )
__lowerCamelCase : Optional[int] = self.post_dropout(UpperCAmelCase )
__lowerCamelCase : str = self.spec_out(UpperCAmelCase )
return spec_out
class _snake_case ( nn.Module ):
def __init__( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str]=1E-6 ):
super().__init__()
__lowerCamelCase : Union[str, Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCAmelCase , d_kv=UpperCAmelCase , num_heads=UpperCAmelCase , dropout_rate=UpperCAmelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCAmelCase , d_kv=UpperCAmelCase , num_heads=UpperCAmelCase , dropout_rate=UpperCAmelCase , layer_norm_epsilon=UpperCAmelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCAmelCase , d_ff=UpperCAmelCase , dropout_rate=UpperCAmelCase , layer_norm_epsilon=UpperCAmelCase ) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Dict=None , ):
__lowerCamelCase : Union[str, Any] = self.layer[0](
UpperCAmelCase , conditioning_emb=UpperCAmelCase , attention_mask=UpperCAmelCase , )
if encoder_hidden_states is not None:
__lowerCamelCase : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
__lowerCamelCase : Dict = self.layer[1](
UpperCAmelCase , key_value_states=UpperCAmelCase , attention_mask=UpperCAmelCase , )
# Apply Film Conditional Feed Forward layer
__lowerCamelCase : List[Any] = self.layer[-1](UpperCAmelCase , UpperCAmelCase )
return (hidden_states,)
class _snake_case ( nn.Module ):
def __init__( self : str , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict ):
super().__init__()
__lowerCamelCase : Union[str, Any] = TaLayerNorm(UpperCAmelCase )
__lowerCamelCase : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = Attention(query_dim=UpperCAmelCase , heads=UpperCAmelCase , dim_head=UpperCAmelCase , out_bias=UpperCAmelCase , scale_qk=UpperCAmelCase )
__lowerCamelCase : Tuple = nn.Dropout(UpperCAmelCase )
def lowerCamelCase__ ( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=None , ):
# pre_self_attention_layer_norm
__lowerCamelCase : int = self.layer_norm(UpperCAmelCase )
if conditioning_emb is not None:
__lowerCamelCase : Optional[Any] = self.FiLMLayer(UpperCAmelCase , UpperCAmelCase )
# Self-attention block
__lowerCamelCase : Optional[Any] = self.attention(UpperCAmelCase )
__lowerCamelCase : Dict = hidden_states + self.dropout(UpperCAmelCase )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
super().__init__()
__lowerCamelCase : str = Attention(query_dim=UpperCAmelCase , heads=UpperCAmelCase , dim_head=UpperCAmelCase , out_bias=UpperCAmelCase , scale_qk=UpperCAmelCase )
__lowerCamelCase : List[Any] = TaLayerNorm(UpperCAmelCase , eps=UpperCAmelCase )
__lowerCamelCase : str = nn.Dropout(UpperCAmelCase )
def lowerCamelCase__ ( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[int]=None , ):
__lowerCamelCase : str = self.layer_norm(UpperCAmelCase )
__lowerCamelCase : Dict = self.attention(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , attention_mask=attention_mask.squeeze(1 ) , )
__lowerCamelCase : List[str] = hidden_states + self.dropout(UpperCAmelCase )
return layer_output
class _snake_case ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] ):
super().__init__()
__lowerCamelCase : str = TaDenseGatedActDense(d_model=UpperCAmelCase , d_ff=UpperCAmelCase , dropout_rate=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCAmelCase )
__lowerCamelCase : Dict = TaLayerNorm(UpperCAmelCase , eps=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = nn.Dropout(UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any=None ):
__lowerCamelCase : int = self.layer_norm(UpperCAmelCase )
if conditioning_emb is not None:
__lowerCamelCase : Union[str, Any] = self.film(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : List[str] = self.DenseReluDense(UpperCAmelCase )
__lowerCamelCase : Optional[int] = hidden_states + self.dropout(UpperCAmelCase )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ):
super().__init__()
__lowerCamelCase : List[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : List[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : List[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : str = nn.Dropout(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = NewGELUActivation()
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : List[Any] ):
__lowerCamelCase : Union[str, Any] = self.act(self.wi_a(UpperCAmelCase ) )
__lowerCamelCase : Any = self.wi_a(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = hidden_gelu * hidden_linear
__lowerCamelCase : Any = self.dropout(UpperCAmelCase )
__lowerCamelCase : List[Any] = self.wo(UpperCAmelCase )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[str]=1E-6 ):
super().__init__()
__lowerCamelCase : List[Any] = nn.Parameter(torch.ones(UpperCAmelCase ) )
__lowerCamelCase : Tuple = eps
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : Any ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
__lowerCamelCase : int = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCAmelCase )
__lowerCamelCase : Dict = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__lowerCamelCase : Union[str, Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _snake_case ( nn.Module ):
def lowerCamelCase__ ( self : str , UpperCAmelCase : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(UpperCAmelCase , 3.0 )) ))
class _snake_case ( nn.Module ):
def __init__( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ):
super().__init__()
__lowerCamelCase : Any = nn.Linear(UpperCAmelCase , out_features * 2 , bias=UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any ):
__lowerCamelCase : Optional[Any] = self.scale_bias(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : Dict = torch.chunk(UpperCAmelCase , 2 , -1 )
__lowerCamelCase : List[Any] = x * (1 + scale) + shift
return x
| 366
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class __UpperCAmelCase (a_ ):
'''simple docstring'''
def __init__( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 363
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 450
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowercase = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :List[Any] , __lowerCamelCase :str=8 ):
_lowerCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def A (__lowerCamelCase :str , __lowerCamelCase :Union[str, Any]=512 , __lowerCamelCase :int=512 ):
_lowerCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCAmelCase = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase = arr.astype(np.floataa ) / 127.5 - 1
_lowerCAmelCase = np.transpose(__lowerCamelCase , [2, 0, 1] )
_lowerCAmelCase = torch.from_numpy(__lowerCamelCase ).unsqueeze(0 )
return image
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase , _lowercase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
_lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = min(int(num_inference_steps * strength ) , _lowercase )
_lowerCAmelCase = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ):
"""simple docstring"""
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}' )
_lowerCAmelCase = image.to(device=_lowercase , dtype=_lowercase )
_lowerCAmelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
elif isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
_lowerCAmelCase = torch.cat(_lowercase , dim=0 )
else:
_lowerCAmelCase = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
_lowerCAmelCase = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase = init_latents.shape
_lowerCAmelCase = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
_lowerCAmelCase = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
_lowerCAmelCase = init_latents
return latents
def _lowercase ( self , _lowercase=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase = torch.device(F'cuda:{gpu_id}' )
_lowerCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def _lowercase ( self , _lowercase=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
_lowerCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self ):
"""simple docstring"""
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ):
"""simple docstring"""
_lowerCAmelCase = self._execution_device
_lowerCAmelCase = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = torch.cat(_lowercase , dim=0 )
_lowerCAmelCase = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase = image_embeds.repeat_interleave(_lowercase , dim=0 )
_lowerCAmelCase = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
_lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor' )
_lowerCAmelCase = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
_lowerCAmelCase = image.to(dtype=image_embeds.dtype , device=_lowercase )
_lowerCAmelCase = self.movq.encode(_lowercase )["""latents"""]
_lowerCAmelCase = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
_lowerCAmelCase , _lowerCAmelCase = self.get_timesteps(_lowercase , _lowercase , _lowercase )
_lowerCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
_lowerCAmelCase = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase = {"""image_embeds""": image_embeds}
_lowerCAmelCase = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase = variance_pred.chunk(2 )
_lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
_lowerCAmelCase = self.movq.decode(_lowercase , force_not_quantize=_lowercase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_lowerCAmelCase = image * 0.5 + 0.5
_lowerCAmelCase = image.clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 716
|
'''simple docstring'''
from __future__ import annotations
def A (__lowerCamelCase :list[int] ):
if len(__lowerCamelCase ) == 0:
return array
_lowerCAmelCase , _lowerCAmelCase = min(__lowerCamelCase ), max(__lowerCamelCase )
# Compute the variables
_lowerCAmelCase = _max - _min + 1
_lowerCAmelCase , _lowerCAmelCase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_lowerCAmelCase = i - _min
_lowerCAmelCase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_lowerCAmelCase = 0
for i in range(__lowerCamelCase ):
while holes_repeat[i] > 0:
_lowerCAmelCase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = input("""Enter numbers separated by comma:\n""")
_lowercase = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 162
| 0
|
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase :
def __init__(self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=3_0 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=3 , lowerCamelCase__=0.6 , lowerCamelCase__=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = mask_ratio
A__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A__ = (image_size // patch_size) ** 2
A__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def A (self ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def A (self ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
A__ = ViTMAEModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
A__ = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A__ = model(lowerCamelCase__ )
A__ = (self.image_size // self.patch_size) ** 2
A__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A__ = 1
A__ = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(lowerCamelCase__ )
A__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def A (self ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ ,A__ ,A__ = config_and_inputs
A__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __snake_case , __snake_case , unittest.TestCase):
__lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__lowerCamelCase = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def A (self ):
"""simple docstring"""
A__ = ViTMAEModelTester(self )
A__ = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def A (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def A (self ):
"""simple docstring"""
pass
def A (self ):
"""simple docstring"""
A__ ,A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def A (self ):
"""simple docstring"""
A__ ,A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowerCamelCase__ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def A (self ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def A (self ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def A (self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
# make masks reproducible
np.random.seed(2 )
A__ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A__ = torch.from_numpy(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A__ = pt_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def A (self ):
"""simple docstring"""
A__ ,A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A__ = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
A__ = outputs[0].cpu().numpy()
A__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
A__ = model_class.from_pretrained(lowerCamelCase__ )
model.to(lowerCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A__ = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
# Make sure we don't have nans
A__ = after_outputs[0].cpu().numpy()
A__ = 0
A__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1E-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def A (self ):
"""simple docstring"""
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def A (self ):
"""simple docstring"""
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def A (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def A (self ):
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A (self ):
"""simple docstring"""
pass
@slow
def A (self ):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTMAEModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( ):
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase):
@cached_property
def A (self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def A (self ):
"""simple docstring"""
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A__ = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(lowerCamelCase__ )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A__ = ViTMAEConfig()
A__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A__ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A__ = model(**lowerCamelCase__ , noise=torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ ) )
# verify the logits
A__ = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
A__ = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase__ ) , atol=1E-4 ) )
| 574
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=__snake_case):
__lowerCamelCase = ["torch", "torchsde"]
def __init__(self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def A (cls , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def A (cls , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """torchsde"""] )
| 574
| 1
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_snake_case = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_snake_case = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
_snake_case = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def lowerCamelCase_ ( A : Optional[int] ):
"""simple docstring"""
def remove_articles(A : Union[str, Any] ):
lowerCAmelCase_ = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(A , ''' ''' , A )
def white_space_fix(A : int ):
return " ".join(text.split() )
def remove_punc(A : str ):
lowerCAmelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A ) ) ) )
def lowerCamelCase_ ( A : Optional[Any] , A : int ):
"""simple docstring"""
return int(normalize_answer(A ) == normalize_answer(A ) )
def lowerCamelCase_ ( A : Tuple , A : Optional[int] ):
"""simple docstring"""
lowerCAmelCase_ = [any(compute_exact(A , A ) for ref in refs ) for pred, refs in zip(A , A )]
return (sum(A ) / len(A )) * 1_00
def lowerCamelCase_ ( A : Dict , A : Tuple , A : List[Any] , A : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase_ = [rgram for rgrams in rgramslist for rgram in rgrams]
lowerCAmelCase_ = Counter(A )
lowerCAmelCase_ = Counter(A )
lowerCAmelCase_ = Counter()
for sgram, scount in sgramcounter.items():
lowerCAmelCase_ = scount * numref
lowerCAmelCase_ = Counter(A )
lowerCAmelCase_ = Counter()
for cgram, ccount in cgramcounter.items():
lowerCAmelCase_ = ccount * numref
# KEEP
lowerCAmelCase_ = sgramcounter_rep & cgramcounter_rep
lowerCAmelCase_ = keepgramcounter_rep & rgramcounter
lowerCAmelCase_ = sgramcounter_rep & rgramcounter
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1
if len(A ) > 0:
lowerCAmelCase_ = keeptmpscorea / len(A )
if len(A ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowerCAmelCase_ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowerCAmelCase_ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
lowerCAmelCase_ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowerCAmelCase_ = sgramcounter_rep - cgramcounter_rep
lowerCAmelCase_ = delgramcounter_rep - rgramcounter
lowerCAmelCase_ = sgramcounter_rep - rgramcounter
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCAmelCase_ = 1
if len(A ) > 0:
lowerCAmelCase_ = deltmpscorea / len(A )
# ADDITION
lowerCAmelCase_ = set(A ) - set(A )
lowerCAmelCase_ = set(A ) & set(A )
lowerCAmelCase_ = set(A ) - set(A )
lowerCAmelCase_ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1
if len(A ) > 0:
lowerCAmelCase_ = addtmpscore / len(A )
if len(A ) > 0:
lowerCAmelCase_ = addtmpscore / len(A )
lowerCAmelCase_ = 0
if addscore_precision > 0 or addscore_recall > 0:
lowerCAmelCase_ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCamelCase_ ( A : Dict , A : Dict , A : List[str] ):
"""simple docstring"""
lowerCAmelCase_ = len(A )
lowerCAmelCase_ = ssent.split(''' ''' )
lowerCAmelCase_ = csent.split(''' ''' )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for rsent in rsents:
lowerCAmelCase_ = rsent.split(''' ''' )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = []
ragramslist.append(A )
for i in range(0 , len(A ) - 1 ):
if i < len(A ) - 1:
lowerCAmelCase_ = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(A )
if i < len(A ) - 2:
lowerCAmelCase_ = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(A )
if i < len(A ) - 3:
lowerCAmelCase_ = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(A )
ragramslist.append(A )
ragramslist.append(A )
ragramslist.append(A )
for i in range(0 , len(A ) - 1 ):
if i < len(A ) - 1:
lowerCAmelCase_ = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(A )
if i < len(A ) - 2:
lowerCAmelCase_ = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(A )
if i < len(A ) - 3:
lowerCAmelCase_ = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(A )
for i in range(0 , len(A ) - 1 ):
if i < len(A ) - 1:
lowerCAmelCase_ = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(A )
if i < len(A ) - 2:
lowerCAmelCase_ = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(A )
if i < len(A ) - 3:
lowerCAmelCase_ = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(A )
((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) = SARIngram(A , A , A , A )
((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) = SARIngram(A , A , A , A )
((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) = SARIngram(A , A , A , A )
((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) = SARIngram(A , A , A , A )
lowerCAmelCase_ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowerCAmelCase_ = sum([delascore, delascore, delascore, delascore] ) / 4
lowerCAmelCase_ = sum([addascore, addascore, addascore, addascore] ) / 4
lowerCAmelCase_ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCamelCase_ ( A : Union[str, Any] , A : bool = True , A : str = "13a" , A : bool = True ):
"""simple docstring"""
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
lowerCAmelCase_ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowerCAmelCase_ = sacrebleu.metrics.bleu._get_tokenizer(A )()(A )
else:
lowerCAmelCase_ = sacrebleu.TOKENIZERS[tokenizer]()(A )
elif tokenizer == "moses":
lowerCAmelCase_ = sacremoses.MosesTokenizer().tokenize(A , return_str=A , escape=A )
elif tokenizer == "penn":
lowerCAmelCase_ = sacremoses.MosesTokenizer().penn_tokenize(A , return_str=A )
else:
lowerCAmelCase_ = sentence
if not return_str:
lowerCAmelCase_ = normalized_sent.split()
return normalized_sent
def lowerCamelCase_ ( A : str , A : Optional[Any] , A : str ):
"""simple docstring"""
if not (len(A ) == len(A ) == len(A )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
lowerCAmelCase_ = 0
for src, pred, refs in zip(A , A , A ):
sari_score += SARIsent(normalize(A ) , normalize(A ) , [normalize(A ) for sent in refs] )
lowerCAmelCase_ = sari_score / len(A )
return 1_00 * sari_score
def lowerCamelCase_ ( A : Optional[Any] , A : Optional[int] , A : List[Any]="exp" , A : Dict=None , A : Dict=False , A : Optional[int]=False , A : Tuple=False , ):
"""simple docstring"""
lowerCAmelCase_ = len(references[0] )
if any(len(A ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowerCAmelCase_ = [[refs[i] for refs in references] for i in range(A )]
lowerCAmelCase_ = sacrebleu.corpus_bleu(
A , A , smooth_method=A , smooth_value=A , force=A , lowercase=A , use_effective_order=A , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''),
}) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = {}
result.update({'''sari''': compute_sari(sources=_UpperCAmelCase , predictions=_UpperCAmelCase , references=_UpperCAmelCase)})
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_UpperCAmelCase , references=_UpperCAmelCase)})
result.update({'''exact''': compute_em(predictions=_UpperCAmelCase , references=_UpperCAmelCase)})
return result
| 413
|
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase = "" , _UpperCAmelCase = False):
# Mapping from the first character of the prefix of the node
lowerCAmelCase_ = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ = is_leaf
lowerCAmelCase_ = prefix
def lowercase__ ( self , _UpperCAmelCase):
lowerCAmelCase_ = 0
for q, w in zip(self.prefix , _UpperCAmelCase):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self , _UpperCAmelCase):
for word in words:
self.insert(_UpperCAmelCase)
def lowercase__ ( self , _UpperCAmelCase):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowerCAmelCase_ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ = RadixNode(prefix=_UpperCAmelCase , is_leaf=_UpperCAmelCase)
else:
lowerCAmelCase_ = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = incoming_node.match(
_UpperCAmelCase)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_UpperCAmelCase)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ = remaining_prefix
lowerCAmelCase_ = self.nodes[matching_string[0]]
lowerCAmelCase_ = RadixNode(_UpperCAmelCase , _UpperCAmelCase)
lowerCAmelCase_ = aux_node
if remaining_word == "":
lowerCAmelCase_ = True
else:
self.nodes[matching_string[0]].insert(_UpperCAmelCase)
def lowercase__ ( self , _UpperCAmelCase):
lowerCAmelCase_ = self.nodes.get(word[0] , _UpperCAmelCase)
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = incoming_node.match(
_UpperCAmelCase)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_UpperCAmelCase)
def lowercase__ ( self , _UpperCAmelCase):
lowerCAmelCase_ = self.nodes.get(word[0] , _UpperCAmelCase)
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = incoming_node.match(
_UpperCAmelCase)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_UpperCAmelCase)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
lowerCAmelCase_ = list(self.nodes.values())[0]
lowerCAmelCase_ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
lowerCAmelCase_ = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ = list(incoming_node.nodes.values())[0]
lowerCAmelCase_ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ = merging_node.nodes
return True
def lowercase__ ( self , _UpperCAmelCase = 0):
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''')
for value in self.nodes.values():
value.print_tree(height + 1)
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase_ = '''banana bananas bandana band apple all beast'''.split()
lowerCAmelCase_ = RadixNode()
root.insert_many(A )
assert all(root.find(A ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCamelCase_ ( ):
"""simple docstring"""
assert test_trie()
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase_ = RadixNode()
lowerCAmelCase_ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(A )
print('''Words:''' , A )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 413
| 1
|
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
A_ : Optional[Any] = "\\n\n"
A_ : int = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
A_ : Any = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def __UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1_6 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case__ : int = """cuda"""
else:
snake_case__ : Any = """cuda""" if torch.cuda.is_available() else """cpu"""
snake_case__ : int = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = model.to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case__ : List[Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case__ : Dict = model.config.max_length - 1
else:
snake_case__ : List[str] = model.config.max_length
snake_case__ : str = tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , return_attention_mask=__SCREAMING_SNAKE_CASE , ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = encodings["""input_ids"""]
snake_case__ : Union[str, Any] = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case__ : int = []
snake_case__ : Optional[Any] = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) ):
snake_case__ : Optional[int] = min(start_index + batch_size , len(__SCREAMING_SNAKE_CASE ) )
snake_case__ : int = encoded_texts[start_index:end_index]
snake_case__ : Optional[Any] = attn_masks[start_index:end_index]
if add_start_token:
snake_case__ : Any = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : int = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
snake_case__ : Optional[Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask] , dim=1 )
snake_case__ : Union[str, Any] = encoded_batch
with torch.no_grad():
snake_case__ : Any = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).logits
snake_case__ : Optional[Any] = out_logits[..., :-1, :].contiguous()
snake_case__ : Tuple = labels[..., 1:].contiguous()
snake_case__ : Tuple = attn_mask[..., 1:].contiguous()
snake_case__ : Optional[Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 38
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
SCREAMING_SNAKE_CASE__ : List[str] = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
SCREAMING_SNAKE_CASE__ : Any = """zero2"""
SCREAMING_SNAKE_CASE__ : Dict = """zero3"""
SCREAMING_SNAKE_CASE__ : str = [ZEROa, ZEROa]
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
a__ : Optional[Any] = parameterized.to_safe_name("_".join(str(lowerCamelCase ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __lowerCAmelCase ( _UpperCamelCase ):
@parameterized.expand(snake_case , name_func=snake_case )
def _snake_case ( self , snake_case , snake_case ) -> Dict:
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def _snake_case ( self , snake_case , snake_case ) -> str:
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@parameterized.expand(snake_case , name_func=snake_case )
def _snake_case ( self , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def _snake_case ( self , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
def _snake_case ( self , snake_case ) -> str:
"""simple docstring"""
pass
def _snake_case ( self , snake_case , snake_case , snake_case = 10 , snake_case = True , snake_case = True , snake_case = True , ) -> str:
"""simple docstring"""
a__ : Tuple = models[model]
a__ : int = self.run_trainer(
stage=snake_case , model_name=snake_case , eval_steps=snake_case , num_train_epochs=1 , distributed=snake_case , fpaa=snake_case , )
self.do_checks(snake_case )
return output_dir
def _snake_case ( self , snake_case , snake_case , snake_case = 10 , snake_case = 1 , snake_case = True , snake_case = True , ) -> Optional[Any]:
"""simple docstring"""
a__ : str = self.get_auto_remove_tmp_dir("./xxx" , after=snake_case )
a__ : List[Any] = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(snake_case )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
a__ : str = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
a__ : Dict = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
a__ : Optional[int] = self.get_launcher(snake_case )
a__ : Optional[int] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case , env=self.get_env() )
return output_dir
def _snake_case ( self , snake_case=False ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 112
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
"""configuration_clap""": [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapAudioConfig""",
"""ClapConfig""",
"""ClapTextConfig""",
],
"""processing_clap""": ["""ClapProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapModel""",
"""ClapPreTrainedModel""",
"""ClapTextModel""",
"""ClapTextModelWithProjection""",
"""ClapAudioModel""",
"""ClapAudioModelWithProjection""",
]
a__ = ["""ClapFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 712
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
a__ = logging.getLogger(__name__)
def lowercase ( SCREAMING_SNAKE_CASE__ : torch.nn.Module , SCREAMING_SNAKE_CASE__ : BnbQuantizationConfig , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Union[int, str, torch.device]]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[Union[int, str], Union[int, str]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ) -> int:
_snake_case : int = bnb_quantization_config.load_in_abit
_snake_case : Tuple = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
_snake_case : List[Any] = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(device_map.keys() ) > 1:
_snake_case : Tuple = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_snake_case : Union[str, Any] = get_keys_to_not_convert(SCREAMING_SNAKE_CASE__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_snake_case : Optional[Any] = []
_snake_case : Dict = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE__ )
# compatibility with peft
_snake_case : Union[str, Any] = load_in_abit
_snake_case : Any = load_in_abit
_snake_case : Optional[int] = get_parameter_device(SCREAMING_SNAKE_CASE__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
_snake_case : int = replace_with_bnb_layers(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , modules_to_not_convert=SCREAMING_SNAKE_CASE__ )
# convert param to the right dtype
_snake_case : Any = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_snake_case : Union[str, Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
_snake_case : Any = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE__ ):
param.to(SCREAMING_SNAKE_CASE__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
_snake_case : Optional[int] = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , modules_to_not_convert=SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , max_memory=SCREAMING_SNAKE_CASE__ , no_split_module_classes=SCREAMING_SNAKE_CASE__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_snake_case : Union[str, Any] = True
_snake_case : Any = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE__ , offload_state_dict=SCREAMING_SNAKE_CASE__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE__ , device_map=SCREAMING_SNAKE_CASE__ , offload_dir=SCREAMING_SNAKE_CASE__ )
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Any=None ) -> List[Any]:
if device_map is None:
if torch.cuda.is_available():
_snake_case : Dict = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
_snake_case : int = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_snake_case : Tuple = {}
_snake_case : List[str] = special_dtypes
_snake_case : int = no_split_module_classes
_snake_case : List[Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_snake_case : Optional[int] = get_balanced_memory(
SCREAMING_SNAKE_CASE__ , low_zero=(device_map == """balanced_low_0""") , max_memory=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_snake_case : str = max_memory
_snake_case : Optional[int] = infer_auto_device_map(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# check if don't have any quantized module on the cpu
_snake_case : List[str] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_snake_case : Dict = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None ) -> List[Any]:
if modules_to_not_convert is None:
_snake_case : Tuple = []
_snake_case , _snake_case : str = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Tuple=None , ) -> Optional[Any]:
_snake_case : List[str] = False
for name, module in model.named_children():
if current_key_name is None:
_snake_case : List[str] = []
current_key_name.append(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_snake_case : int = """.""".join(SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_snake_case : Optional[int] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_snake_case : List[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_snake_case : Any = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
_snake_case : List[str] = module.weight.data
if module.bias is not None:
_snake_case : List[Any] = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = True
if len(list(module.children() ) ) > 0:
_snake_case , _snake_case : Optional[int] = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> int:
# Create a copy of the model
with init_empty_weights():
_snake_case : Optional[Any] = deepcopy(SCREAMING_SNAKE_CASE__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_snake_case : Tuple = find_tied_parameters(SCREAMING_SNAKE_CASE__ )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case : List[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_snake_case : Optional[Any] = sum(SCREAMING_SNAKE_CASE__ , [] )
_snake_case : Optional[Any] = len(SCREAMING_SNAKE_CASE__ ) > 0
# Check if it is a base model
_snake_case : str = False
if hasattr(SCREAMING_SNAKE_CASE__ , """base_model_prefix""" ):
_snake_case : List[Any] = not hasattr(SCREAMING_SNAKE_CASE__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_snake_case : str = list(model.named_children() )
_snake_case : Dict = [list_modules[-1][0]]
# add last module together with tied weights
_snake_case : Optional[int] = set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = list(set(SCREAMING_SNAKE_CASE__ ) ) + list(SCREAMING_SNAKE_CASE__ )
# remove ".weight" from the keys
_snake_case : Union[str, Any] = [""".weight""", """.bias"""]
_snake_case : List[str] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_snake_case : Optional[Any] = name.replace(SCREAMING_SNAKE_CASE__ , """""" )
filtered_module_names.append(SCREAMING_SNAKE_CASE__ )
return filtered_module_names
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict ) -> Tuple:
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE__ , bnb.nn.Linearabit ):
return True
return False
def lowercase ( SCREAMING_SNAKE_CASE__ : nn.Module ) -> Union[str, Any]:
return next(parameter.parameters() ).device
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 , dtype=SCREAMING_SNAKE_CASE__ , value=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = param_name
_snake_case : List[Any] = model
if "." in tensor_name:
_snake_case : str = tensor_name.split(""".""" )
for split in splits[:-1]:
_snake_case : Tuple = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
_snake_case : Tuple = new_module
_snake_case : Dict = splits[-1]
# offload weights
_snake_case : List[str] = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , )
else:
offload_weight(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
offload_weight(SCREAMING_SNAKE_CASE__ , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , """meta""" , dtype=SCREAMING_SNAKE_CASE__ , value=torch.empty(*param.size() ) )
| 198
| 0
|
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
_validate_point(_snake_case )
_validate_point(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_snake_case ,_snake_case ) ) )
def lowercase_ ( _snake_case ):
if point:
if isinstance(_snake_case ,_snake_case ):
for item in point:
if not isinstance(_snake_case ,(int, float) ):
SCREAMING_SNAKE_CASE__ : int = (
"""Expected a list of numbers as input, found """
f'''{type(_snake_case ).__name__}'''
)
raise TypeError(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : Tuple = f'''Expected a list of numbers as input, found {type(_snake_case ).__name__}'''
raise TypeError(_snake_case )
else:
raise ValueError("""Missing an input""" )
def lowercase_ ( _snake_case ,_snake_case ):
_validate_point(_snake_case )
_validate_point(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_snake_case ,_snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = '''data2vec-vision'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[3, 5, 7, 11] , SCREAMING_SNAKE_CASE__=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.4 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=2_55 , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[int] = image_size
SCREAMING_SNAKE_CASE__ : List[Any] = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_mask_token
SCREAMING_SNAKE_CASE__ : Dict = use_absolute_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = use_relative_position_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_shared_relative_position_bias
SCREAMING_SNAKE_CASE__ : Dict = layer_scale_init_value
SCREAMING_SNAKE_CASE__ : int = drop_path_rate
SCREAMING_SNAKE_CASE__ : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ : int = out_indices
SCREAMING_SNAKE_CASE__ : str = pool_scales
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ : Any = use_auxiliary_head
SCREAMING_SNAKE_CASE__ : str = auxiliary_loss_weight
SCREAMING_SNAKE_CASE__ : str = auxiliary_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = auxiliary_num_convs
SCREAMING_SNAKE_CASE__ : Dict = auxiliary_concat_input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = semantic_loss_ignore_index
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
| 223
| 1
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _a ( lowerCamelCase_ ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _a ( ):
snake_case : Union[str, Any] =ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=lowercase__ )
snake_case : List[str] =parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(lowercase__ )
EnvironmentCommand.register_subcommand(lowercase__ )
TestCommand.register_subcommand(lowercase__ )
RunBeamCommand.register_subcommand(lowercase__ )
DummyDataCommand.register_subcommand(lowercase__ )
# Parse args
snake_case , snake_case : List[Any] =parser.parse_known_args()
if not hasattr(lowercase__ , '''func''' ):
parser.print_help()
exit(1 )
snake_case : Any =parse_unknown_args(lowercase__ )
# Run
snake_case : List[Any] =args.func(lowercase__ , **lowercase__ )
service.run()
if __name__ == "__main__":
main()
| 702
|
'''simple docstring'''
def _a ( lowerCamelCase_ = 1_00 ):
snake_case : List[Any] =0
snake_case : List[str] =0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 136
| 0
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ):
UpperCamelCase :int = []
for part_id in partition_order:
UpperCamelCase :int = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(SCREAMING_SNAKE_CASE__ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _A ( ):
UpperCamelCase :Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCamelCase :List[str] = spark.range(100 ).repartition(1 )
UpperCamelCase :List[Any] = Spark(SCREAMING_SNAKE_CASE__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _A ( ):
UpperCamelCase :Optional[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCamelCase :Any = spark.range(10 ).repartition(2 )
UpperCamelCase :Optional[int] = [1, 0]
UpperCamelCase :Optional[Any] = _generate_iterable_examples(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Reverse the partitions.
UpperCamelCase :Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCamelCase , UpperCamelCase :Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _A ( ):
UpperCamelCase :Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCamelCase :Dict = spark.range(10 ).repartition(1 )
UpperCamelCase :Union[str, Any] = SparkExamplesIterable(SCREAMING_SNAKE_CASE__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE__ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _A ( ):
UpperCamelCase :Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCamelCase :Optional[Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
UpperCamelCase :Dict = lambda SCREAMING_SNAKE_CASE__ : x.reverse()
UpperCamelCase :List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE__ , [2, 1, 0] )
UpperCamelCase :Any = SparkExamplesIterable(SCREAMING_SNAKE_CASE__ ).shuffle_data_sources(SCREAMING_SNAKE_CASE__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase , UpperCamelCase :Optional[int] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _A ( ):
UpperCamelCase :int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCamelCase :Union[str, Any] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
UpperCamelCase :Dict = SparkExamplesIterable(SCREAMING_SNAKE_CASE__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCamelCase :Any = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase , UpperCamelCase :str = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCamelCase :Any = SparkExamplesIterable(SCREAMING_SNAKE_CASE__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCamelCase :List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase , UpperCamelCase :int = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _A ( ):
UpperCamelCase :Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCamelCase :int = spark.range(100 ).repartition(1 )
UpperCamelCase :int = Spark(SCREAMING_SNAKE_CASE__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 658
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCAmelCase_ :
"""simple docstring"""
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
return None
class UpperCAmelCase_ :
"""simple docstring"""
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
return None
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any =[
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCAmelCase ( self ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def UpperCAmelCase ( self ) -> int:
from transformers import BertModel
UpperCamelCase :int = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
vocab_file.flush()
UpperCamelCase :Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
UpperCamelCase :Union[str, Any] = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE_ ) ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , SCREAMING_SNAKE_CASE_ )
@require_tf
@slow
def UpperCAmelCase ( self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase :Tuple = self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = quantize(Path(SCREAMING_SNAKE_CASE_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase :str = self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = quantize(SCREAMING_SNAKE_CASE_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
UpperCamelCase :Union[str, Any] = Path(SCREAMING_SNAKE_CASE_ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return path
except Exception as e:
self.fail(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_tokenizers
@slow
def UpperCAmelCase ( self ) -> List[str]:
from transformers import BertModel
UpperCamelCase :List[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
UpperCamelCase :int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def UpperCAmelCase ( self ) -> List[Any]:
from transformers import TFBertModel
UpperCamelCase :Optional[Any] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
UpperCamelCase :Optional[Any] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''tf''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :Tuple = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = infer_shapes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Assert all variables are present
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE_ )
self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :int = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
UpperCamelCase :Tuple = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
UpperCamelCase , UpperCamelCase :Any = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(SCREAMING_SNAKE_CASE_ ) , set(SCREAMING_SNAKE_CASE_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(SCREAMING_SNAKE_CASE_ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
UpperCamelCase , UpperCamelCase :Tuple = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :str = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 658
| 1
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
@staticmethod
@abstractmethod
def snake_case_ ( __snake_case : ArgumentParser ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def snake_case_ ( self : List[Any] ) -> int:
raise NotImplementedError()
| 249
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : Optional[Any] = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
_a : List[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
_a : Optional[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
_a : Any = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_a : Any = [3, 3, 3, 3]
_a : Optional[int] = [5, 5, 5, 5]
elif "fl4" in model_name:
_a : List[str] = [4, 4, 4, 4]
_a : Optional[int] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_a : str = [3, 3, 3, 3]
if "lrf" in model_name:
_a : int = [3, 3, 3, 3]
else:
_a : str = [2, 2, 2, 2]
if "tiny" in model_name:
_a : Optional[int] = 96
elif "small" in model_name:
_a : Dict = 96
elif "base" in model_name:
_a : Any = 128
elif "large" in model_name:
_a : int = 192
elif "xlarge" in model_name:
_a : Optional[int] = 256
elif "huge" in model_name:
_a : str = 352
# set label information
_a : Any = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
_a : Optional[int] = '''imagenet-22k-id2label.json'''
else:
_a : Dict = '''imagenet-1k-id2label.json'''
_a : int = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
_a : int = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
_a : Optional[int] = {v: k for k, v in idalabel.items()}
_a : Union[str, Any] = FocalNetConfig(
embed_dim=UpperCamelCase_ , depths=UpperCamelCase_ , focal_levels=UpperCamelCase_ , focal_windows=UpperCamelCase_ , use_conv_embed=UpperCamelCase_ , idalabel=UpperCamelCase_ , labelaid=UpperCamelCase_ , use_post_layernorm=UpperCamelCase_ , use_layerscale=UpperCamelCase_ , )
return config
def lowerCamelCase_ ( UpperCamelCase_ ):
if "patch_embed.proj" in name:
_a : Tuple = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_a : Dict = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
_a : Any = '''encoder.''' + name
if "encoder.layers" in name:
_a : int = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
_a : int = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
_a : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_a : Dict = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_a : Any = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_a : Tuple = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
_a : Optional[int] = '''layernorm.weight'''
if name == "norm.bias":
_a : List[str] = '''layernorm.bias'''
if "head" in name:
_a : List[str] = name.replace('''head''' , '''classifier''' )
else:
_a : str = '''focalnet.''' + name
return name
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
# fmt: off
_a : str = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
_a : List[Any] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , UpperCamelCase_ )
_a : Optional[int] = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
_a : int = state_dict.pop(UpperCamelCase_ )
_a : List[Any] = val
_a : List[str] = get_focalnet_config(UpperCamelCase_ )
_a : Dict = FocalNetForImageClassification(UpperCamelCase_ )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase_ )
# verify conversion
_a : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : List[str] = BitImageProcessor(
do_resize=UpperCamelCase_ , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase_ , crop_size=224 , do_normalize=UpperCamelCase_ , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ , )
_a : Tuple = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
_a : str = processor(images=UpperCamelCase_ , return_tensors='''pt''' )
_a : Dict = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
_a : int = image_transforms(UpperCamelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase_ , atol=1E-4 )
_a : Optional[Any] = model(**UpperCamelCase_ )
_a : int = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_a : str = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
_a : Tuple = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
_a : List[Any] = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
_a : Optional[int] = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
_a : List[Any] = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
_a : List[Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
__UpperCAmelCase : Optional[int] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 249
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74
|
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : str = RobertaEmbeddings(_lowercase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Optional[Any] = config.num_labels
snake_case_ : Dict = config.num_hidden_layers
snake_case_ : str = DeeRobertaModel(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.num_layers
try:
snake_case_ : int = self.roberta(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
snake_case_ : str = outputs[1]
snake_case_ : Union[str, Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : List[Any] = e.message
snake_case_ : Union[str, Any] = e.exit_layer
snake_case_ : Dict = outputs[0]
if not self.training:
snake_case_ : Dict = entropy(_lowercase )
snake_case_ : Optional[int] = []
snake_case_ : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Union[str, Any] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : int = []
for highway_exit in outputs[-1]:
snake_case_ : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[int] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : List[str] = (loss,) + outputs
if not self.training:
snake_case_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 58
| 0
|
'''simple docstring'''
from collections import namedtuple
__UpperCAmelCase = namedtuple("""from_to""", """from_ to""")
__UpperCAmelCase = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1000),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00454, 264.172),
"""cubicyard""": from_to(0.76455, 1.30795),
"""cubicfoot""": from_to(0.028, 35.3147),
"""cup""": from_to(0.000236588, 4226.75),
}
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ """, """.join(lowerCamelCase_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ """, """.join(lowerCamelCase_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
'''simple docstring'''
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : List[str] = number
while duplicate > 0:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = divmod(lowerCamelCase_ , 10 )
fact_sum += factorial(lowerCamelCase_ )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
__UpperCAmelCase = int(input("""Enter number: """).strip())
print(
f'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 79
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( a , a , a ):
__snake_case = list(range(len(a ) ) )
__snake_case = [v / w for v, w in zip(a , a )]
index.sort(key=lambda a : ratio[i] , reverse=a )
__snake_case = 0
__snake_case = [0] * len(a )
for i in index:
if weight[i] <= capacity:
__snake_case = 1
max_value += value[i]
capacity -= weight[i]
else:
__snake_case = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
'''simple docstring'''
def lowerCamelCase__ ( a ):
__snake_case = int(a )
if n_element < 1:
__snake_case = ValueError('a should be a positive number' )
raise my_error
__snake_case = [1]
__snake_case , __snake_case , __snake_case = (0, 0, 0)
__snake_case = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_lowercase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
_lowercase = hamming(int(n))
print("""-----------------------------------------------------""")
print(f'''The list with nth numbers is: {hamming_numbers}''')
print("""-----------------------------------------------------""")
| 356
| 1
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : int=2 , __UpperCamelCase : str=56 , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Any=99 , __UpperCamelCase : str=32 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Optional[Any]=7 , __UpperCamelCase : str="gelu_new" , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : str=0.1 , __UpperCamelCase : Optional[int]=512 , __UpperCamelCase : Optional[int]=16 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Any=0.02 , __UpperCamelCase : str=4 , __UpperCamelCase : List[Any]="block_sparse" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : str=2 , __UpperCamelCase : Tuple=3 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
_UpperCAmelCase = rescale_embeddings
_UpperCAmelCase = attention_type
_UpperCAmelCase = use_bias
_UpperCAmelCase = block_size
_UpperCAmelCase = num_random_blocks
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Optional[Any] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Dict = False
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : Tuple ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : List[str] ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : Optional[Any] ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : Dict ):
super().test_hidden_states_output()
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(__UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model_class(__UpperCamelCase )
@jax.jit
def model_jitted(__UpperCamelCase : Dict , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : List[Any] ):
return model(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , **__UpperCamelCase )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase = model_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase = model_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any]=1e-5 , __UpperCamelCase : Dict="outputs" , __UpperCamelCase : Any=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
| 129
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__lowerCAmelCase = pd.read_csv("sample_data.csv", header=None)
__lowerCAmelCase = df.shape[:1][0]
# If you're using some other dataset input the target column
__lowerCAmelCase = df.iloc[:, 1:2]
__lowerCAmelCase = actual_data.values.reshape(len_data, 1)
__lowerCAmelCase = MinMaxScaler().fit_transform(actual_data)
__lowerCAmelCase = 1_0
__lowerCAmelCase = 5
__lowerCAmelCase = 2_0
__lowerCAmelCase = len_data - periods * look_back
__lowerCAmelCase = actual_data[:division]
__lowerCAmelCase = actual_data[division - look_back :]
__lowerCAmelCase , __lowerCAmelCase = [], []
__lowerCAmelCase , __lowerCAmelCase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__lowerCAmelCase = np.array(train_x)
__lowerCAmelCase = np.array(test_x)
__lowerCAmelCase = np.array([list(i.ravel()) for i in train_y])
__lowerCAmelCase = np.array([list(i.ravel()) for i in test_y])
__lowerCAmelCase = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
__lowerCAmelCase = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
__lowerCAmelCase = model.predict(x_test)
| 129
| 1
|
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ = 16 , UpperCamelCase__ = 88 , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 32 , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "geglu" , UpperCamelCase__ = None , ) -> Dict:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase_ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase_ = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase_ = [1, 0]
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = True , ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = hidden_states
lowerCamelCase_ = []
lowerCamelCase_ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase_ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase_ = self.transformer_index_for_condition[i]
lowerCamelCase_ = self.transformers[transformer_index](
__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase_ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase_ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__snake_case )
| 142
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_lowerCAmelCase : int = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ['DPTFeatureExtractor']
_lowerCAmelCase : Tuple = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 246
| 0
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCamelCase : Tuple = ["bert-base-uncased", "bert-base-cased"]
lowerCamelCase : Union[str, Any] = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class A( tf.keras.Model ):
'''simple docstring'''
def __init__( self : int , A_ : Any ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = tokenizer
lowerCamelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
lowerCamelCase_ = TFAutoModel.from_config(_UpperCAmelCase )
def a__ ( self : Dict , A_ : str ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer(_UpperCAmelCase )
lowerCamelCase_ = self.bert(**_UpperCAmelCase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [
BertTokenizer.from_pretrained(_UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
lowerCamelCase_ = [TFBertTokenizer.from_pretrained(_UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_UpperCAmelCase , use_fast_bert_tokenizer=_UpperCAmelCase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCamelCase_ = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
lowerCamelCase_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCamelCase_ = tokenizer(_UpperCAmelCase , return_tensors='tf' , padding='longest' )
lowerCamelCase_ = tf_tokenizer(_UpperCAmelCase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = tf_tokenizer(self.paired_sentences )
lowerCamelCase_ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = tf.function(_UpperCAmelCase )
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCamelCase_ = tf.constant(_UpperCAmelCase )
lowerCamelCase_ = compiled_tokenizer(_UpperCAmelCase )
lowerCamelCase_ = tf_tokenizer(_UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = ModelToSave(tokenizer=_UpperCAmelCase )
lowerCamelCase_ = tf.convert_to_tensor(self.test_sentences )
lowerCamelCase_ = model(_UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase_ = Path(_UpperCAmelCase ) / '''saved.model'''
model.save(_UpperCAmelCase )
lowerCamelCase_ = tf.keras.models.load_model(_UpperCAmelCase )
lowerCamelCase_ = loaded_model(_UpperCAmelCase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 713
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase : int = datasets.logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCamelCase : Tuple = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCamelCase : Optional[Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] , lowercase : Any=False , lowercase : Any=False , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int="dummy_doc" ):
'''simple docstring'''
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , key_doc_lines[doc] , lowercase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowercase , sys_doc_lines[doc] , lowercase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowercase , key_doc_lines[doc] , lowercase , lowercase )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowercase , lowercase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = reader.get_mention_assignments(lowercase , lowercase )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = get_coref_infos(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowercase , lowercase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def a__ ( self : List[str] , A_ : Optional[Any] , A_ : Optional[int] , A_ : int=True , A_ : str=False , A_ : int=False , A_ : Union[str, Any]=False ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(A_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , )
return score
| 651
| 0
|
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__=10_24 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = [], []
SCREAMING_SNAKE_CASE__ = list(zip(snake_case__ , snake_case__ ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = sorted_examples[0]
def is_too_big(snake_case__ ):
return tok(snake_case__ , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
SCREAMING_SNAKE_CASE__ = new_src + """ """ + src
SCREAMING_SNAKE_CASE__ = new_tgt + """ """ + tgt
if is_too_big(snake_case__ ) or is_too_big(snake_case__ ): # cant fit, finalize example
finished_src.append(snake_case__ )
finished_tgt.append(snake_case__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = src, tgt
else: # can fit, keep adding
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(snake_case__ )
finished_tgt.append(snake_case__ )
return finished_src, finished_tgt
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Path(snake_case__ )
save_path.mkdir(exist_ok=snake_case__ )
for split in ["train"]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
SCREAMING_SNAKE_CASE__ = [x.rstrip() for x in Path(snake_case__ ).open().readlines()]
SCREAMING_SNAKE_CASE__ = [x.rstrip() for x in Path(snake_case__ ).open().readlines()]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pack_examples(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
print(f"""packed {split} split from {len(snake_case__ )} examples -> {len(snake_case__ )}.""" )
Path(save_path / f"""{split}.source""" ).open("""w""" ).write("""\n""".join(snake_case__ ) )
Path(save_path / f"""{split}.target""" ).open("""w""" ).write("""\n""".join(snake_case__ ) )
for split in ["val", "test"]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(snake_case__ , save_path / f"""{split}.source""" )
shutil.copyfile(snake_case__ , save_path / f"""{split}.target""" )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=snake_case__ , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=snake_case__ , default=1_28 )
parser.add_argument("""--data_dir""" , type=snake_case__ )
parser.add_argument("""--save_path""" , type=snake_case__ )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(snake_case__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 196
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A_ : List[str] = pytest.mark.integration
@require_faiss
class lowerCamelCase (A__ ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(__UpperCAmelCase ) for x in np.arange(3_0 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
import faiss
SCREAMING_SNAKE_CASE__ = self._create_dummy_dataset()
SCREAMING_SNAKE_CASE__ = dset.map(
lambda __UpperCAmelCase , __UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = dset.add_faiss_index("""vecs""" , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
import faiss
SCREAMING_SNAKE_CASE__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
import faiss
SCREAMING_SNAKE_CASE__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(__UpperCAmelCase , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
from elasticsearch import Elasticsearch
SCREAMING_SNAKE_CASE__ = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
SCREAMING_SNAKE_CASE__ = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 3_0 )
SCREAMING_SNAKE_CASE__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 2_9}]}}
SCREAMING_SNAKE_CASE__ = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class lowerCamelCase (A__ ):
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
import faiss
SCREAMING_SNAKE_CASE__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
SCREAMING_SNAKE_CASE__ = np.zeros(5 , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
SCREAMING_SNAKE_CASE__ = np.eye(5 , dtype=np.floataa )[::-1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search_batch(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search_batch , queries[0] )
SCREAMING_SNAKE_CASE__ = [scores[0] for scores in total_scores]
SCREAMING_SNAKE_CASE__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
import faiss
SCREAMING_SNAKE_CASE__ = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
SCREAMING_SNAKE_CASE__ = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
import faiss
SCREAMING_SNAKE_CASE__ = faiss.IndexFlat(5 )
SCREAMING_SNAKE_CASE__ = FaissIndex(custom_index=__UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
import faiss
SCREAMING_SNAKE_CASE__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
SCREAMING_SNAKE_CASE__ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
SCREAMING_SNAKE_CASE__ = np.zeros(5 , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search(__UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def A ( snake_case__ ):
'''simple docstring'''
import faiss
SCREAMING_SNAKE_CASE__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
SCREAMING_SNAKE_CASE__ = """index.faiss"""
SCREAMING_SNAKE_CASE__ = f"""mock://{index_name}"""
index.save(snake_case__ , storage_options=mockfs.storage_options )
SCREAMING_SNAKE_CASE__ = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
SCREAMING_SNAKE_CASE__ = np.zeros(5 , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCamelCase (A__ ):
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
SCREAMING_SNAKE_CASE__ = Elasticsearch()
SCREAMING_SNAKE_CASE__ = {"""acknowledged""": True}
SCREAMING_SNAKE_CASE__ = ElasticSearchIndex(es_client=__UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
SCREAMING_SNAKE_CASE__ = """foo"""
SCREAMING_SNAKE_CASE__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search(__UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
SCREAMING_SNAKE_CASE__ = """foo"""
SCREAMING_SNAKE_CASE__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search(__UpperCAmelCase , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
SCREAMING_SNAKE_CASE__ = ["""foo""", """bar""", """foobar"""]
SCREAMING_SNAKE_CASE__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search_batch(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = [scores[0] for scores in total_scores]
SCREAMING_SNAKE_CASE__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
# batched queries with timeout
SCREAMING_SNAKE_CASE__ = ["""foo""", """bar""", """foobar"""]
SCREAMING_SNAKE_CASE__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = index.search_batch(__UpperCAmelCase , request_timeout=3_0 )
SCREAMING_SNAKE_CASE__ = [scores[0] for scores in total_scores]
SCREAMING_SNAKE_CASE__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
| 196
| 1
|
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = ''''''
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = ''''''
for i in range(len(lowerCAmelCase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = int("0b" + data[0] + data[-1] , 2 )
snake_case__ = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = message[:4]
snake_case__ = message[4:]
snake_case__ = apply_table(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case__ = xor(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case__ = apply_sbox(lowerCAmelCase_ , temp[:4] ) # noqa: E741
snake_case__ = apply_sbox(lowerCAmelCase_ , temp[4:] )
snake_case__ = '''0''' * (2 - len(lowerCAmelCase_ )) + l # noqa: E741
snake_case__ = '''0''' * (2 - len(lowerCAmelCase_ )) + r
snake_case__ = apply_table(l + r , lowerCAmelCase_ )
snake_case__ = xor(lowerCAmelCase_ , lowerCAmelCase_ )
return temp + right
if __name__ == "__main__":
__magic_name__ = input('''Enter 10 bit key: ''')
__magic_name__ = input('''Enter 8 bit message: ''')
__magic_name__ = [6, 3, 7, 4, 8, 5, 10, 9]
__magic_name__ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__magic_name__ = [2, 4, 3, 1]
__magic_name__ = [2, 6, 3, 1, 4, 8, 5, 7]
__magic_name__ = [4, 1, 3, 5, 7, 2, 8, 6]
__magic_name__ = [4, 1, 2, 3, 2, 3, 4, 1]
__magic_name__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__magic_name__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__magic_name__ = apply_table(key, paa_table)
__magic_name__ = temp[:5]
__magic_name__ = temp[5:]
__magic_name__ = left_shift(left)
__magic_name__ = left_shift(right)
__magic_name__ = apply_table(left + right, pa_table)
__magic_name__ = left_shift(left)
__magic_name__ = left_shift(right)
__magic_name__ = left_shift(left)
__magic_name__ = left_shift(right)
__magic_name__ = apply_table(left + right, pa_table)
# encryption
__magic_name__ = apply_table(message, IP)
__magic_name__ = function(expansion, sa, sa, keya, temp)
__magic_name__ = temp[4:] + temp[:4]
__magic_name__ = function(expansion, sa, sa, keya, temp)
__magic_name__ = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
__magic_name__ = apply_table(CT, IP)
__magic_name__ = function(expansion, sa, sa, keya, temp)
__magic_name__ = temp[4:] + temp[:4]
__magic_name__ = function(expansion, sa, sa, keya, temp)
__magic_name__ = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 703
|
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(100, 0.2_5) = }''')
print(F'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 530
| 0
|
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Dict ) ->Any:
# Return True if there is node that has not iterated.
lowerCamelCase__ : List[Any] =[False] * len(snake_case_ )
lowerCamelCase__ : Union[str, Any] =[]
queue.append(snake_case_ )
lowerCamelCase__ : Optional[int] =True
while queue:
lowerCamelCase__ : List[Any] =queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(snake_case_ )
lowerCamelCase__ : Tuple =True
lowerCamelCase__ : str =u
return visited[t]
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : List[Any] ) ->Dict:
# This array is filled by BFS and to store path
lowerCamelCase__ : str =[-1] * (len(snake_case_ ))
lowerCamelCase__ : Tuple =0
while bfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowerCamelCase__ : Optional[int] =float('Inf' )
lowerCamelCase__ : Any =sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ : str =min(snake_case_ , graph[parent[s]][s] )
lowerCamelCase__ : str =parent[s]
max_flow += path_flow
lowerCamelCase__ : int =sink
while v != source:
lowerCamelCase__ : str =parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ : List[Any] =parent[v]
return max_flow
lowerCAmelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase , lowerCAmelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 174
|
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowerCAmelCase = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 174
| 1
|
"""simple docstring"""
_A : Optional[Any] = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_A : List[str] = {value: key for key, value in MORSE_CODE_DICT.items()}
def __magic_name__ ( __snake_case : str ) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __magic_name__ ( __snake_case : str ) -> str:
return "".join(REVERSE_DICT[char] for char in message.split() )
def __magic_name__ ( ) -> None:
lowercase : Any = "Morse code here!"
print(__snake_case )
lowercase : int = encrypt(__snake_case )
print(__snake_case )
lowercase : List[Any] = decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 518
|
"""simple docstring"""
def __magic_name__ ( __snake_case : str ) -> list:
lowercase : Optional[Any] = [0] * len(__snake_case )
for i in range(1 , len(__snake_case ) ):
# use last results for better performance - dynamic programming
lowercase : int = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase : Any = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase : List[str] = j
return prefix_result
def __magic_name__ ( __snake_case : str ) -> int:
return max(prefix_function(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 518
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 181
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 113
| 0
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
_lowerCAmelCase = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_:
'''simple docstring'''
__lowercase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
__lowercase : List[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(SCREAMING_SNAKE_CASE_ )} , )
__lowercase : List[Any] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__lowercase : Any = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__lowercase : int = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class lowerCAmelCase_:
'''simple docstring'''
__lowercase : Dict = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''The input training data file (a text file).'''} )
__lowercase : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
__lowercase : Any = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
__lowercase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
__lowercase : Union[str, Any] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
__lowercase : Union[str, Any] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
__lowercase : Dict = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
__lowercase : List[str] = field(default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
__lowercase : List[str] = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__lowercase : Tuple = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
__lowercase : Dict = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
__lowercase : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
__lowercase : Any = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase = False , UpperCamelCase = None , ):
"""simple docstring"""
def _dataset(UpperCamelCase , UpperCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=__A , file_path=__A , block_size=args.block_size , ref_path=__A , )
return LineByLineTextDataset(tokenizer=__A , file_path=__A , block_size=args.block_size )
else:
return TextDataset(
tokenizer=__A , file_path=__A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__A , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__A ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ : int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCAmelCase__ : List[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ : List[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowerCAmelCase__ : List[str] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
lowerCAmelCase__ : int = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
lowerCAmelCase__ : Dict = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
lowerCAmelCase__ : str = AutoModelWithLMHead.from_config(__A )
model.resize_token_embeddings(len(__A ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
lowerCAmelCase__ : Union[str, Any] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCAmelCase__ : Union[str, Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowerCAmelCase__ : List[str] = (
get_dataset(__A , tokenizer=__A , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCAmelCase__ : Tuple = (
get_dataset(__A , tokenizer=__A , evaluate=__A , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCAmelCase__ : Optional[Any] = DataCollatorForPermutationLanguageModeling(
tokenizer=__A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCAmelCase__ : Tuple = DataCollatorForWholeWordMask(
tokenizer=__A , mlm_probability=data_args.mlm_probability )
else:
lowerCAmelCase__ : Any = DataCollatorForLanguageModeling(
tokenizer=__A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase__ : List[str] = Trainer(
model=__A , args=__A , data_collator=__A , train_dataset=__A , eval_dataset=__A , prediction_loss_only=__A , )
# Training
if training_args.do_train:
lowerCAmelCase__ : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__A )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ : List[str] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase__ : Dict = trainer.evaluate()
lowerCAmelCase__ : Tuple = math.exp(eval_output["""eval_loss"""] )
lowerCAmelCase__ : List[str] = {'''perplexity''': perplexity}
lowerCAmelCase__ : List[str] = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(__A , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , __A , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(__A )
return results
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 565
|
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( __A : str , __A : str ):
a_ : int = get_failure_array(__A )
# 2) Step through text searching for pattern
a_ , a_ : Any = 0, 0 # index into text, pattern
while i < len(__A ):
if pattern[j] == text[i]:
if j == (len(__A ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
a_ : Any = failure[j - 1]
continue
i += 1
return False
def _UpperCAmelCase ( __A : str ):
a_ : Optional[Any] = [0]
a_ : Any = 0
a_ : int = 1
while j < len(__A ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
a_ : List[Any] = failure[i - 1]
continue
j += 1
failure.append(__A )
return failure
if __name__ == "__main__":
# Test 1)
__lowerCAmelCase = 'abc1abc12'
__lowerCAmelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__lowerCAmelCase = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__lowerCAmelCase = 'ABABX'
__lowerCAmelCase = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__lowerCAmelCase = 'AAAB'
__lowerCAmelCase = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__lowerCAmelCase = 'abcdabcy'
__lowerCAmelCase = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__lowerCAmelCase = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 466
| 0
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
snake_case__ : Any = 16
snake_case__ : Dict = 32
def _lowerCamelCase ( lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
return int(x / 2**20 )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __enter__( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCAmelCase_ : Dict = torch.cuda.memory_allocated()
return self
def __exit__( self , *snake_case_ ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
UpperCAmelCase_ : List[Any] = torch.cuda.memory_allocated()
UpperCAmelCase_ : Any = torch.cuda.max_memory_allocated()
UpperCAmelCase_ : Optional[int] = bamb(self.end - self.begin )
UpperCAmelCase_ : List[str] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def _lowerCamelCase ( lowerCamelCase_ : Accelerator , lowerCamelCase_ : int = 16 , lowerCamelCase_ : str = "bert-base-cased" , lowerCamelCase_ : int = 320 , lowerCamelCase_ : int = 160 , ):
"""simple docstring"""
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = load_dataset(
'glue' , 'mrpc' , split={'train': F'''train[:{n_train}]''', 'validation': F'''validation[:{n_val}]'''} )
def tokenize_function(lowerCamelCase_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : Optional[int] = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowerCamelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase_ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(lowerCamelCase_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
return train_dataloader, eval_dataloader
def _lowerCamelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : Tuple = config["""lr"""]
UpperCAmelCase_ : Optional[Any] = int(config['num_epochs'] )
UpperCAmelCase_ : Tuple = int(config['seed'] )
UpperCAmelCase_ : Optional[int] = int(config['batch_size'] )
UpperCAmelCase_ : List[Any] = args.model_name_or_path
set_seed(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
# Instantiate optimizer
UpperCAmelCase_ : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ : Tuple = optimizer_cls(params=model.parameters() , lr=lowerCamelCase_ )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ : str = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Union[str, Any] = (len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=0 , num_training_steps=lowerCamelCase_ , )
else:
UpperCAmelCase_ : Dict = DummyScheduler(lowerCamelCase_ , total_num_steps=lowerCamelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ : Any = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ : Any = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ : str = 0
# Now we train the model
UpperCAmelCase_ : Optional[int] = {}
for epoch in range(lowerCamelCase_ , lowerCamelCase_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ )
UpperCAmelCase_ : Tuple = outputs.loss
UpperCAmelCase_ : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCAmelCase_ : Any = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=lowerCamelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowerCamelCase_ , )
parser.add_argument(
'--output_dir' , type=lowerCamelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=lowerCamelCase_ , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=lowerCamelCase_ , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=lowerCamelCase_ , default=1 , help='Number of train epochs.' , )
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 708
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Tuple = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 389
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCamelCase : int = 16
__UpperCamelCase : str = 32
def snake_case ( lowerCamelCase , lowerCamelCase = 16 , lowerCamelCase = "bert-base-cased" ):
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
__lowercase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowercase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
__lowercase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
return train_dataloader, eval_dataloader
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
model.eval()
__lowercase = 0
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase = model(**lowerCamelCase )
__lowercase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowercase , __lowercase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCamelCase ) - 1:
__lowercase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowercase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
__lowercase = metric.compute()
return eval_metric["accuracy"]
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase = config["""lr"""]
__lowercase = int(config["""num_epochs"""] )
__lowercase = int(config["""seed"""] )
__lowercase = int(config["""batch_size"""] )
__lowercase = args.model_name_or_path
set_seed(lowerCamelCase )
__lowercase , __lowercase = get_dataloaders(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase , return_dict=lowerCamelCase )
# Instantiate optimizer
__lowercase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowercase = optimizer_cls(params=model.parameters() , lr=lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
__lowercase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowercase = 1
__lowercase = (len(lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowercase = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=0 , num_training_steps=lowerCamelCase , )
else:
__lowercase = DummyScheduler(lowerCamelCase , total_num_steps=lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
__lowercase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowercase = 0
__lowercase = evaluate.load("""glue""" , """mrpc""" )
__lowercase = num_epochs
if args.partial_train_epoch is not None:
__lowercase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowercase = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowercase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowercase = int(lowerCamelCase ) + 1
__lowercase = evaluation_loop(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
accelerator.print("""resumed checkpoint performance:""" , lowerCamelCase )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__lowercase = json.load(lowerCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowercase = {}
for epoch in range(lowerCamelCase , lowerCamelCase ):
model.train()
for step, batch in enumerate(lowerCamelCase ):
__lowercase = model(**lowerCamelCase )
__lowercase = outputs.loss
__lowercase = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowercase = F'epoch_{epoch}'
__lowercase = os.path.join(args.output_dir , lowerCamelCase )
accelerator.save_state(lowerCamelCase )
__lowercase = evaluation_loop(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__lowercase = accuracy
__lowercase = lr_scheduler.get_lr()[0]
__lowercase = optimizer.param_groups[0]["""lr"""]
__lowercase = epoch
__lowercase = overall_step
accelerator.print(F'epoch {epoch}:' , lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
def snake_case ( ):
'''simple docstring'''
__lowercase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowerCamelCase , default=lowerCamelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowerCamelCase , default=lowerCamelCase , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase , default=2 , help="""Number of train epochs.""" , )
__lowercase = parser.parse_args()
__lowercase = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
main()
| 80
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''swinv2'''
snake_case__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : str , __UpperCamelCase : List[str]=224 , __UpperCamelCase : Any=4 , __UpperCamelCase : int=3 , __UpperCamelCase : Tuple=96 , __UpperCamelCase : Union[str, Any]=[2, 2, 6, 2] , __UpperCamelCase : List[Any]=[3, 6, 12, 24] , __UpperCamelCase : Optional[int]=7 , __UpperCamelCase : List[str]=4.0 , __UpperCamelCase : int=True , __UpperCamelCase : Optional[int]=0.0 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : int=False , __UpperCamelCase : Tuple=0.0_2 , __UpperCamelCase : Any=1E-5 , __UpperCamelCase : Optional[Any]=32 , **__UpperCamelCase : Any , ) -> List[Any]:
super().__init__(**__UpperCamelCase )
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = embed_dim
_UpperCamelCase = depths
_UpperCamelCase = len(__UpperCamelCase )
_UpperCamelCase = num_heads
_UpperCamelCase = window_size
_UpperCamelCase = mlp_ratio
_UpperCamelCase = qkv_bias
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = drop_path_rate
_UpperCamelCase = hidden_act
_UpperCamelCase = use_absolute_embeddings
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCamelCase = int(embed_dim * 2 ** (len(__UpperCamelCase ) - 1) )
_UpperCamelCase = (0, 0, 0, 0)
| 420
| 0
|
'''simple docstring'''
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCamelCase_ : Optional[int] = _modexpt(_SCREAMING_SNAKE_CASE , exponent // 2 , _SCREAMING_SNAKE_CASE ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_SCREAMING_SNAKE_CASE , exponent - 1 , _SCREAMING_SNAKE_CASE )) % modulo_value
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int = 1777 , _SCREAMING_SNAKE_CASE : int = 1855 , _SCREAMING_SNAKE_CASE : int = 8 ):
UpperCamelCase_ : Optional[int] = base
for _ in range(1 , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : Union[str, Any] = _modexpt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 708
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( __a ):
a__ :Optional[int] = (CMStochasticIterativeScheduler,)
a__ :Optional[Any] = 10
def A_ (self , **__UpperCamelCase ) -> Dict:
UpperCamelCase_ : int = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**__UpperCamelCase )
return config
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : Union[str, Any] = 10
UpperCamelCase_ : str = self.get_scheduler_config()
UpperCamelCase_ : List[str] = self.scheduler_classes[0](**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = scheduler.timesteps[0]
UpperCamelCase_ : Union[str, Any] = scheduler.timesteps[1]
UpperCamelCase_ : Dict = self.dummy_sample
UpperCamelCase_ : Tuple = 0.1 * sample
UpperCamelCase_ : int = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
UpperCamelCase_ : str = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A_ (self ) -> Dict:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def A_ (self ) -> Union[str, Any]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=__UpperCamelCase )
def A_ (self ) -> int:
UpperCamelCase_ : int = self.scheduler_classes[0]
UpperCamelCase_ : Any = self.get_scheduler_config()
UpperCamelCase_ : int = scheduler_class(**__UpperCamelCase )
UpperCamelCase_ : Optional[int] = 1
scheduler.set_timesteps(__UpperCamelCase )
UpperCamelCase_ : Tuple = scheduler.timesteps
UpperCamelCase_ : Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase_ : Optional[Any] = self.dummy_model()
UpperCamelCase_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(__UpperCamelCase ):
# 1. scale model input
UpperCamelCase_ : Optional[int] = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# 2. predict noise residual
UpperCamelCase_ : int = model(__UpperCamelCase , __UpperCamelCase )
# 3. predict previous sample x_t-1
UpperCamelCase_ : List[str] = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
UpperCamelCase_ : Optional[Any] = pred_prev_sample
UpperCamelCase_ : List[Any] = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCamelCase_ : List[str] = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 192.7_614 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def A_ (self ) -> int:
UpperCamelCase_ : Tuple = self.scheduler_classes[0]
UpperCamelCase_ : List[str] = self.get_scheduler_config()
UpperCamelCase_ : List[Any] = scheduler_class(**__UpperCamelCase )
UpperCamelCase_ : Any = [106, 0]
scheduler.set_timesteps(timesteps=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = scheduler.timesteps
UpperCamelCase_ : Any = torch.manual_seed(0 )
UpperCamelCase_ : Any = self.dummy_model()
UpperCamelCase_ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase_ : int = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# 2. predict noise residual
UpperCamelCase_ : List[Any] = model(__UpperCamelCase , __UpperCamelCase )
# 3. predict previous sample x_t-1
UpperCamelCase_ : str = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
UpperCamelCase_ : str = pred_prev_sample
UpperCamelCase_ : str = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCamelCase_ : Union[str, Any] = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 347.6_357 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : Any = self.scheduler_classes[0]
UpperCamelCase_ : List[Any] = self.get_scheduler_config()
UpperCamelCase_ : List[str] = scheduler_class(**__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(__UpperCamelCase , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : Tuple = self.scheduler_classes[0]
UpperCamelCase_ : Union[str, Any] = self.get_scheduler_config()
UpperCamelCase_ : Any = scheduler_class(**__UpperCamelCase )
UpperCamelCase_ : List[Any] = [39, 30, 12, 1, 0]
UpperCamelCase_ : str = len(__UpperCamelCase )
with self.assertRaises(__UpperCamelCase , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase )
def A_ (self ) -> Tuple:
UpperCamelCase_ : List[str] = self.scheduler_classes[0]
UpperCamelCase_ : Any = self.get_scheduler_config()
UpperCamelCase_ : List[str] = scheduler_class(**__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__UpperCamelCase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
| 138
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = KandinskyVaaPipeline
_lowerCamelCase = [
"""image_embeds""",
"""negative_image_embeds""",
]
_lowerCamelCase = ["""image_embeds""", """negative_image_embeds"""]
_lowerCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowerCamelCase = False
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return 100
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : Optional[Any] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__A : Optional[Any] = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = self.dummy_unet
__A : Optional[int] = self.dummy_movq
__A : List[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__lowerCamelCase , )
__A : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=0 ):
'''simple docstring'''
__A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
__A : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
if str(__lowerCamelCase ).startswith('''mps''' ):
__A : Optional[Any] = torch.manual_seed(__lowerCamelCase )
else:
__A : Dict = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
__A : Dict = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = '''cpu'''
__A : str = self.get_dummy_components()
__A : Dict = self.pipeline_class(**__lowerCamelCase )
__A : Tuple = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : List[str] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
__A : Tuple = output.images
__A : Tuple = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
__A : Dict = image[0, -3:, -3:, -1]
__A : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__A : Optional[int] = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
__A : Optional[Any] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
__A : int = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
__A : Union[str, Any] = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
__A : List[Any] = '''red cat, 4k photo'''
__A : Union[str, Any] = torch.Generator(device='''cuda''' ).manual_seed(0 )
__A , __A : List[str] = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__A : Any = torch.Generator(device='''cuda''' ).manual_seed(0 )
__A : Optional[int] = pipeline(
image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , output_type='''np''' , )
__A : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 177
| 0
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
class __lowercase :
def __init__( self , lowercase_ , lowercase_) -> Union[str, Any]:
__snake_case = question_encoder
__snake_case = generator
__snake_case = self.question_encoder
def _a ( self , lowercase_) -> List[str]:
if os.path.isfile(lowercase_):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file")
os.makedirs(lowercase_ , exist_ok=lowercase_)
__snake_case = os.path.join(lowercase_ , 'question_encoder_tokenizer')
__snake_case = os.path.join(lowercase_ , 'generator_tokenizer')
self.question_encoder.save_pretrained(lowercase_)
self.generator.save_pretrained(lowercase_)
@classmethod
def _a ( cls , lowercase_ , **lowercase_) -> List[Any]:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__snake_case = kwargs.pop('config' , lowercase_)
if config is None:
__snake_case = RagConfig.from_pretrained(lowercase_)
__snake_case = AutoTokenizer.from_pretrained(
lowercase_ , config=config.question_encoder , subfolder='question_encoder_tokenizer')
__snake_case = AutoTokenizer.from_pretrained(
lowercase_ , config=config.generator , subfolder='generator_tokenizer')
return cls(question_encoder=lowercase_ , generator=lowercase_)
def __call__( self , *lowercase_ , **lowercase_) -> Tuple:
return self.current_tokenizer(*lowercase_ , **lowercase_)
def _a ( self , *lowercase_ , **lowercase_) -> str:
return self.generator.batch_decode(*lowercase_ , **lowercase_)
def _a ( self , *lowercase_ , **lowercase_) -> List[str]:
return self.generator.decode(*lowercase_ , **lowercase_)
def _a ( self) -> int:
__snake_case = self.question_encoder
def _a ( self) -> Optional[int]:
__snake_case = self.generator
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = "longest" , lowercase_ = None , lowercase_ = True , **lowercase_ , ) -> BatchEncoding:
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , lowercase_ , )
if max_length is None:
__snake_case = self.current_tokenizer.model_max_length
__snake_case = self(
lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , max_length=lowercase_ , padding=lowercase_ , truncation=lowercase_ , **lowercase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__snake_case = self.current_tokenizer.model_max_length
__snake_case = self(
text_target=lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , **lowercase_ , )
__snake_case = labels['input_ids']
return model_inputs
| 676
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676
| 1
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[Any]:
config.addinivalue_line(
'markers' ,'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' ,'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' ,'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' ,'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' ,'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' ,'tool_tests: mark the tool tests that are run on their specific schedule' )
def _UpperCamelCase ( __UpperCamelCase ) -> List[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCamelCase )
def _UpperCamelCase ( __UpperCamelCase ) -> str:
from transformers.testing_utils import pytest_terminal_summary_main
lowerCamelCase_ = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__UpperCamelCase ,id=__UpperCamelCase )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Any:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowerCamelCase_ = 0
# Doctest custom flag to ignore output.
A_ = doctest.register_optionflag("IGNORE_RESULT")
A_ = doctest.OutputChecker
class UpperCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ = CustomOutputChecker
A_ = HfDoctestModule
A_ = HfDocTestParser
| 42
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( A ):
UpperCamelCase__ = args.pruning_method
UpperCamelCase__ = args.threshold
UpperCamelCase__ = args.model_name_or_path.rstrip('''/''' )
UpperCamelCase__ = args.target_model_path
print(f"Load fine-pruned model from {model_name_or_path}" )
UpperCamelCase__ = torch.load(os.path.join(A , '''pytorch_model.bin''' ) )
UpperCamelCase__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
UpperCamelCase__ = tensor
print(f"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
UpperCamelCase__ = tensor
print(f"Copied layer {name}" )
elif "bias" in name:
UpperCamelCase__ = tensor
print(f"Copied layer {name}" )
else:
if pruning_method == "magnitude":
UpperCamelCase__ = MagnitudeBinarizer.apply(inputs=A , threshold=A )
UpperCamelCase__ = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
UpperCamelCase__ = name[:-6]
UpperCamelCase__ = model[f"{prefix_}mask_scores"]
UpperCamelCase__ = TopKBinarizer.apply(A , A )
UpperCamelCase__ = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
UpperCamelCase__ = name[:-6]
UpperCamelCase__ = model[f"{prefix_}mask_scores"]
UpperCamelCase__ = ThresholdBinarizer.apply(A , A , A )
UpperCamelCase__ = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
UpperCamelCase__ = name[:-6]
UpperCamelCase__ = model[f"{prefix_}mask_scores"]
UpperCamelCase__ , UpperCamelCase__ = -0.1, 1.1
UpperCamelCase__ = torch.sigmoid(A )
UpperCamelCase__ = s * (r - l) + l
UpperCamelCase__ = s_bar.clamp(min=0.0 , max=1.0 )
UpperCamelCase__ = tensor * mask
print(f"Pruned layer {name}" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
UpperCamelCase__ = os.path.join(
os.path.dirname(A ) , f"bertarized_{os.path.basename(A )}" )
if not os.path.isdir(A ):
shutil.copytree(A , A )
print(f"\nCreated folder {target_model_path}" )
torch.save(A , os.path.join(A , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
__magic_name__ =parser.parse_args()
main(args)
| 415
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
_lowerCamelCase = ViTImageProcessor if is_vision_available() else None
@property
def lowerCAmelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
__magic_name__ = (3, 32, 128)
__magic_name__ = tempfile.mkdtemp()
# fmt: off
__magic_name__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__magic_name__ = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
__magic_name__ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
__magic_name__ = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
__magic_name__ = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__magic_name__ = Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) )
return image_input
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_image_processor()
__magic_name__ = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_image_processor()
__magic_name__ = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__magic_name__ = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
__magic_name__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = image_processor(UpperCamelCase_ , return_tensors='''np''' )
__magic_name__ = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__magic_name__ = '''test'''
__magic_name__ = processor(text=UpperCamelCase_ )
__magic_name__ = tokenizer(UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__magic_name__ = '''test'''
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__magic_name__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ = processor.char_decode(UpperCamelCase_ )
__magic_name__ = tokenizer.batch_decode(UpperCamelCase_ )
__magic_name__ = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__magic_name__ = None
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_image_processor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__magic_name__ = torch.randn(1 , 27 , 38 )
__magic_name__ = torch.randn(1 , 27 , 5_0257 )
__magic_name__ = torch.randn(1 , 27 , 3_0522 )
__magic_name__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 190
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = '''fnet'''
def __init__( self , UpperCamelCase_=3_2000 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu_new" , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=4 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-1_2 , UpperCamelCase_=False , UpperCamelCase_=512 , UpperCamelCase_=3 , UpperCamelCase_=1 , UpperCamelCase_=2 , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = initializer_range
__magic_name__ = type_vocab_size
__magic_name__ = layer_norm_eps
__magic_name__ = use_tpu_fourier_optimizations
__magic_name__ = tpu_short_seq_length
| 190
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase = random.Random()
if is_torch_available():
import torch
def __UpperCamelCase ( lowercase__ : List[Any] , lowercase__ : Optional[Any]=1.0 , lowercase__ : Dict=None , lowercase__ : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
if rng is None:
lowerCAmelCase_ : Tuple = global_rng
lowerCAmelCase_ : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __a ( unittest.TestCase ):
def __init__( self : str , UpperCAmelCase : Dict , UpperCAmelCase : str=7 , UpperCAmelCase : int=4_00 , UpperCAmelCase : Any=20_00 , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Optional[Any]=1_60_00 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , ):
lowerCAmelCase_ : Dict = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Optional[int] = min_seq_length
lowerCAmelCase_ : Tuple = max_seq_length
lowerCAmelCase_ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase_ : Tuple = feature_size
lowerCAmelCase_ : Optional[int] = padding_value
lowerCAmelCase_ : Tuple = sampling_rate
lowerCAmelCase_ : str = return_attention_mask
lowerCAmelCase_ : int = do_normalize
def A ( self : Any ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def A ( self : Optional[Any] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : Dict=False ):
def _flatten(UpperCAmelCase : Dict ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
lowerCAmelCase_ : Optional[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase_ : List[str] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase_ : List[Any] = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __a ( __UpperCamelCase ,unittest.TestCase ):
__snake_case : Dict = ASTFeatureExtractor
def A ( self : str ):
lowerCAmelCase_ : Tuple = ASTFeatureExtractionTester(self )
def A ( self : Tuple ):
lowerCAmelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase_ : Any = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase_ : List[str] = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase_ : List[str] = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
lowerCAmelCase_ : Dict = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
lowerCAmelCase_ : str = feat_extract(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="""np""" ).input_values
lowerCAmelCase_ : int = feat_extract(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase_ : Dict = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCAmelCase_ : Tuple = np.asarray(lowerCamelCase__ )
lowerCAmelCase_ : int = feat_extract(lowerCamelCase__ , return_tensors="""np""" ).input_values
lowerCAmelCase_ : str = feat_extract(lowerCamelCase__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
@require_torch
def A ( self : Optional[Any] ):
import torch
lowerCAmelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : List[Any] = np.random.rand(1_00 ).astype(np.floataa )
lowerCAmelCase_ : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase_ : List[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase_ : Union[str, Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def A ( self : str , UpperCAmelCase : str ):
from datasets import load_dataset
lowerCAmelCase_ : List[str] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowerCAmelCase_ : Dict = ds.sort("""id""" ).select(range(lowerCamelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def A ( self : List[Any] ):
lowerCAmelCase_ : str = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
lowerCAmelCase_ : Any = self._load_datasamples(1 )
lowerCAmelCase_ : Tuple = ASTFeatureExtractor()
lowerCAmelCase_ : str = feature_extractor(lowerCamelCase__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase__ , atol=1e-4 ) )
| 600
|
'''simple docstring'''
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
return int((input_a, input_a).count(0 ) != 0 )
def A__ ( ):
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 195
| 0
|
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def a__ ( _UpperCamelCase : Iterable[str] ,_UpperCamelCase : int ):
__lowerCamelCase = iter(UpperCamelCase__ )
while True:
__lowerCamelCase = tuple(itertools.islice(UpperCamelCase__ ,UpperCamelCase__ ) )
if not chunk:
return
yield chunk
def a__ ( _UpperCamelCase : str ):
__lowerCamelCase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
__lowerCamelCase = ''''''
if len(UpperCamelCase__ ) < 2:
return dirty
for i in range(len(UpperCamelCase__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCamelCase__ ) & 1:
clean += "X"
return clean
def a__ ( _UpperCamelCase : str ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
__lowerCamelCase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__lowerCamelCase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCamelCase__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCamelCase__ )
return table
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
__lowerCamelCase = generate_table(UpperCamelCase__ )
__lowerCamelCase = prepare_input(UpperCamelCase__ )
__lowerCamelCase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ ,2 ):
__lowerCamelCase ,__lowerCamelCase = divmod(table.index(UpperCamelCase__ ) ,5 )
__lowerCamelCase ,__lowerCamelCase = divmod(table.index(UpperCamelCase__ ) ,5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
__lowerCamelCase = generate_table(UpperCamelCase__ )
__lowerCamelCase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ ,2 ):
__lowerCamelCase ,__lowerCamelCase = divmod(table.index(UpperCamelCase__ ) ,5 )
__lowerCamelCase ,__lowerCamelCase = divmod(table.index(UpperCamelCase__ ) ,5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 715
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length, 2) ,_UpperCamelCase )
else:
__lowerCamelCase = np.full((len(_UpperCamelCase ), sequence_length) ,_UpperCamelCase )
for i, tensor in enumerate(_UpperCamelCase ):
if padding_side == "right":
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tensor[:sequence_length]
else:
__lowerCamelCase = tensor[:sequence_length]
else:
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = tensor[:sequence_length]
else:
__lowerCamelCase = tensor[:sequence_length]
return out_tensor.tolist()
def a__ ( _UpperCamelCase : Dict ):
__lowerCamelCase = ord(_UpperCamelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
__lowerCamelCase = unicodedata.category(_UpperCamelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = -1_0_0
lowerCAmelCase__ = "pt"
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
import torch
__lowerCamelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
__lowerCamelCase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowerCamelCase = self.tokenizer.pad(
__UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__lowerCamelCase = torch.tensor(batch['''entity_ids'''] ).shape[1]
__lowerCamelCase = self.tokenizer.padding_side
if padding_side == "right":
__lowerCamelCase = [
list(__UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) for label in labels
]
else:
__lowerCamelCase = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) + list(__UpperCAmelCase ) for label in labels
]
__lowerCamelCase = [feature['''ner_tags'''] for feature in features]
__lowerCamelCase = padding_tensor(__UpperCAmelCase , -1 , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = [feature['''original_entity_spans'''] for feature in features]
__lowerCamelCase = padding_tensor(__UpperCAmelCase , (-1, -1) , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = {k: torch.tensor(__UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 622
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Tuple , __A : Dict , __A : Optional[Any]=7 , __A : str=3 , __A : int=18 , __A : Optional[Any]=30 , __A : Optional[int]=400 , __A : int=True , __A : List[str]=None , __A : Union[str, Any]=True , ):
__A : Optional[int] = size if size is not None else {"""height""": 18, """width""": 18}
__A : Any = parent
__A : Dict = batch_size
__A : Any = num_channels
__A : Any = image_size
__A : str = min_resolution
__A : List[str] = max_resolution
__A : Dict = do_resize
__A : Tuple = size
__A : List[str] = apply_ocr
def lowerCAmelCase_ ( self : Optional[Any] ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase_ ( self : List[Any] ):
__A : Dict = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : str ):
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
self.assertTrue(hasattr(__A , """apply_ocr""" ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase_ ( self : Union[str, Any] ):
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
# Initialize image_processing
__A : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : int = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __A )
self.assertIsInstance(encoding.boxes , __A )
# Test batched
__A : Union[str, Any] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processing
__A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase_ ( self : Optional[Any] ):
# Initialize image_processing
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__A : Union[str, Any] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase_ ( self : int ):
# with apply_OCR = True
__A : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
__A : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
__A : Any = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
__A : Optional[int] = image_processing(__A , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__A : Optional[int] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
__A : int = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __A )
self.assertListEqual(encoding.boxes , __A )
# with apply_OCR = False
__A : Any = LayoutLMvaImageProcessor(apply_ocr=__A )
__A : int = image_processing(__A , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 17
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( a_ ,a_ ,a_ ,unittest.TestCase ):
__lowerCAmelCase = AltDiffusionPipeline
__lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case_ ( self ):
torch.manual_seed(0 )
a_ : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
a_ : Union[str, Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
torch.manual_seed(0 )
a_ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
a_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
a_ : List[str] = CLIPTextModel(a_ )
a_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
a_ : List[Any] = 7_7
a_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case_ ( self , a_ , a_=0 ):
if str(a_ ).startswith("mps" ):
a_ : int = torch.manual_seed(a_ )
else:
a_ : Tuple = torch.Generator(device=a_ ).manual_seed(a_ )
a_ : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case_ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case_ ( self ):
a_ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ : List[str] = self.get_dummy_components()
torch.manual_seed(0 )
a_ : List[str] = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
a_ : Optional[int] = RobertaSeriesModelWithTransformation(a_ )
a_ : List[str] = text_encoder
a_ : int = AltDiffusionPipeline(**a_ )
a_ : Dict = alt_pipe.to(a_ )
alt_pipe.set_progress_bar_config(disable=a_ )
a_ : List[str] = self.get_dummy_inputs(a_ )
a_ : Union[str, Any] = "A photo of an astronaut"
a_ : Optional[Any] = alt_pipe(**a_ )
a_ : Dict = output.images
a_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ : Optional[int] = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
a_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ : Tuple = self.get_dummy_components()
a_ : str = PNDMScheduler(skip_prk_steps=a_ )
torch.manual_seed(0 )
a_ : int = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
a_ : Dict = RobertaSeriesModelWithTransformation(a_ )
a_ : Union[str, Any] = text_encoder
a_ : int = AltDiffusionPipeline(**a_ )
a_ : Optional[int] = alt_pipe.to(a_ )
alt_pipe.set_progress_bar_config(disable=a_ )
a_ : List[Any] = self.get_dummy_inputs(a_ )
a_ : Tuple = alt_pipe(**a_ )
a_ : int = output.images
a_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ : Optional[int] = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def snake_case_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
# make sure here that pndm scheduler skips prk
a_ : Union[str, Any] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=a_ )
a_ : Optional[int] = alt_pipe.to(a_ )
alt_pipe.set_progress_bar_config(disable=a_ )
a_ : Tuple = "A painting of a squirrel eating a burger"
a_ : Tuple = torch.manual_seed(0 )
a_ : Dict = alt_pipe([prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2_0 , output_type="np" )
a_ : Optional[int] = output.images
a_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Optional[Any] = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
a_ : Any = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
a_ : int = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=a_ , safety_checker=a_ )
a_ : Tuple = alt_pipe.to(a_ )
alt_pipe.set_progress_bar_config(disable=a_ )
a_ : List[Any] = "A painting of a squirrel eating a burger"
a_ : str = torch.manual_seed(0 )
a_ : str = alt_pipe([prompt] , generator=a_ , num_inference_steps=2 , output_type="numpy" )
a_ : Dict = output.images
a_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Union[str, Any] = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 370
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=30 , UpperCamelCase__=400 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=0.9 , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> str:
'''simple docstring'''
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 30}
snake_case : Tuple = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
snake_case : List[Any] = parent
snake_case : Optional[int] = batch_size
snake_case : List[Any] = num_channels
snake_case : Dict = min_resolution
snake_case : List[str] = max_resolution
snake_case : List[str] = do_resize_and_center_crop
snake_case : List[Any] = size
snake_case : int = crop_pct
snake_case : Dict = crop_size
snake_case : List[str] = do_normalize
snake_case : Optional[int] = image_mean
snake_case : int = image_std
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCAmelCase ( a__ , unittest.TestCase ):
__UpperCAmelCase : str = PoolFormerImageProcessor if is_vision_available() else None
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : str = PoolFormerImageProcessingTester(self )
@property
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(__a , "size" ) )
self.assertTrue(hasattr(__a , "crop_pct" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "image_mean" ) )
self.assertTrue(hasattr(__a , "image_std" ) )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case : str = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case : Dict = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
snake_case : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case : Tuple = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 178
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Dict = logging.get_logger(__name__)
def __lowerCamelCase ( A__ : Optional[Any] ) -> List[str]:
lowerCamelCase_ : int = """huggingface/label-files"""
lowerCamelCase_ : Dict = """imagenet-1k-id2label.json"""
lowerCamelCase_ : Optional[Any] = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase_ : str = {int(A__ ): v for k, v in idalabel.items()}
lowerCamelCase_ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase_ : str = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCamelCase_ : Optional[Any] = BitConfig(
conv_layer=A__ , num_labels=1000 , idalabel=A__ , labelaid=A__ , )
return config
def __lowerCamelCase ( A__ : str ) -> Any:
if "stem.conv" in name:
lowerCamelCase_ : Any = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCamelCase_ : Dict = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
lowerCamelCase_ : Optional[Any] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
lowerCamelCase_ : int = """bit.""" + name
if "bit" not in name and "classifier" not in name:
lowerCamelCase_ : str = """bit.encoder.""" + name
return name
def __lowerCamelCase ( ) -> List[Any]:
lowerCamelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase_ : Optional[Any] = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( A__ : List[Any] , A__ : List[str] , A__ : Tuple=False ) -> List[str]:
lowerCamelCase_ : Optional[Any] = get_config(A__ )
# load original model from timm
lowerCamelCase_ : Optional[Any] = create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model
lowerCamelCase_ : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCamelCase_ : int = state_dict.pop(A__ )
lowerCamelCase_ : Union[str, Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
lowerCamelCase_ : Tuple = BitForImageClassification(A__ )
model.eval()
model.load_state_dict(A__ )
# create image processor
lowerCamelCase_ : Optional[Any] = create_transform(**resolve_data_config({} , model=A__ ) )
lowerCamelCase_ : List[Any] = transform.transforms
lowerCamelCase_ : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
lowerCamelCase_ : List[str] = BitImageProcessor(
do_resize=A__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=A__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase_ : int = prepare_img()
lowerCamelCase_ : int = transform(A__ ).unsqueeze(0 )
lowerCamelCase_ : List[str] = processor(A__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(A__ , A__ )
# verify logits
with torch.no_grad():
lowerCamelCase_ : str = model(A__ )
lowerCamelCase_ : int = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCamelCase_ : List[Any] = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(A__ ).mkdir(exist_ok=A__ )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
processor.save_pretrained(A__ )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
snake_case__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
snake_case__ : int = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278
| 0
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase (lowerCamelCase ):
def __lowerCamelCase ( self ):
__snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'embed_dim' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'num_heads' ) )
class _lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=[16, 48, 96] , SCREAMING_SNAKE_CASE_=[1, 3, 6] , SCREAMING_SNAKE_CASE_=[1, 2, 10] , SCREAMING_SNAKE_CASE_=[7, 3, 3] , SCREAMING_SNAKE_CASE_=[4, 2, 2] , SCREAMING_SNAKE_CASE_=[2, 1, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 2] , SCREAMING_SNAKE_CASE_=[False, False, True] , SCREAMING_SNAKE_CASE_=[0.0, 0.0, 0.0] , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=2 , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_sizes
__snake_case = patch_stride
__snake_case = patch_padding
__snake_case = is_training
__snake_case = use_labels
__snake_case = num_labels
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = num_heads
__snake_case = stride_kv
__snake_case = depth
__snake_case = cls_token
__snake_case = attention_drop_rate
__snake_case = initializer_range
__snake_case = layer_norm_eps
def __lowerCamelCase ( self ):
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = CvtModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE_ )
__snake_case = (self.image_size, self.image_size)
__snake_case , __snake_case = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__snake_case = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__snake_case = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = self.num_labels
__snake_case = CvtForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
lowercase__ = (
{"""feature-extraction""": CvtModel, """image-classification""": CvtForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
__snake_case = CvtModelTester(self )
__snake_case = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def __lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(SCREAMING_SNAKE_CASE_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__snake_case = outputs.hidden_states
__snake_case = len(self.model_tester.depth )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowerCamelCase ( self ):
pass
@slow
def __lowerCamelCase ( self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = CvtModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __lowercase( ) -> Union[str, Any]:
__snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase (unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __lowerCamelCase ( self ):
__snake_case = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE_ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
__snake_case = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
__snake_case = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 345
|
lowerCamelCase_ : List[str] = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
lowerCamelCase_ : List[str] = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def __lowercase( __snake_case : float ,__snake_case : str ,__snake_case : str ) -> float:
__snake_case = from_type.lower().strip('s' )
__snake_case = to_type.lower().strip('s' )
__snake_case = UNIT_SYMBOL.get(__snake_case ,__snake_case )
__snake_case = UNIT_SYMBOL.get(__snake_case ,__snake_case )
if from_sanitized not in METRIC_CONVERSION:
__snake_case = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {', '.join(__snake_case )}'''
)
raise ValueError(__snake_case )
if to_sanitized not in METRIC_CONVERSION:
__snake_case = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {', '.join(__snake_case )}'''
)
raise ValueError(__snake_case )
__snake_case = METRIC_CONVERSION[from_sanitized]
__snake_case = METRIC_CONVERSION[to_sanitized]
__snake_case = 1
if from_exponent > to_exponent:
__snake_case = from_exponent - to_exponent
else:
__snake_case = -(to_exponent - from_exponent)
return value * pow(10 ,__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 345
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCamelCase = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=lowerCamelCase_ , cache_dir=lowerCamelCase_)
UpperCamelCase = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_ , os.listdir(lowerCamelCase_)[0] , '''snapshots'''))]
UpperCamelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''') for f in files)
@slow
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=lowerCamelCase_)
UpperCamelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCamelCase = jax.random.PRNGKey(0)
UpperCamelCase = 4
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
# shard inputs and rng
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.151_4745) < 1e-3
assert np.abs(np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 4_9947.875) < 5e-1
UpperCamelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(lowerCamelCase_) == num_samples
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=lowerCamelCase_)
UpperCamelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCamelCase = jax.random.PRNGKey(0)
UpperCamelCase = 5_0
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
# shard inputs and rng
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0565_2401)) < 1e-3
assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 238_3808.2)) < 5e-1
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCamelCase_)
UpperCamelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCamelCase = jax.random.PRNGKey(0)
UpperCamelCase = 5_0
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
# shard inputs and rng
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0400_3906)) < 1e-3
assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 237_3516.75)) < 5e-1
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa)
UpperCamelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCamelCase = jax.random.PRNGKey(0)
UpperCamelCase = 5_0
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
# shard inputs and rng
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0400_3906)) < 1e-3
assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 237_3516.75)) < 5e-1
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=lowerCamelCase_ , steps_offset=1 , )
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , )
UpperCamelCase = scheduler.create_state()
UpperCamelCase = scheduler_state
UpperCamelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCamelCase = jax.random.PRNGKey(0)
UpperCamelCase = 5_0
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
# shard inputs and rng
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_4504_3945)) < 1e-3
assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 234_7693.5)) < 5e-1
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCamelCase = jax.device_count()
UpperCamelCase = num_samples * [prompt]
UpperCamelCase = jax.random.split(jax.random.PRNGKey(0) , lowerCamelCase_)
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCamelCase_ , )
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
UpperCamelCase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCamelCase_ , use_memory_efficient_attention=lowerCamelCase_ , )
UpperCamelCase = replicate(lowerCamelCase_)
UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_)
UpperCamelCase = shard(lowerCamelCase_)
UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
UpperCamelCase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1e-2
| 34
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = TransfoXLTokenizer
_A : Union[str, Any] = False
_A : Tuple = False
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
__lowercase : List[str] = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
__lowercase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCAmelCase ( self : Union[str, Any] , **__a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Any , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """<unk> UNwanted , running"""
__lowercase : Dict = """<unk> unwanted, running"""
return input_text, output_text
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Optional[int] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__a )
__lowercase : Any = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(__a , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [0, 4, 8, 7] )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase : Tuple = TransfoXLTokenizer(lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = TransfoXLTokenizer(lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Tuple = TransfoXLTokenizer(lower_case=__a )
__lowercase : List[str] = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
__lowercase : Tuple = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(__a ) , __a )
self.assertEqual(tokenizer.convert_tokens_to_string(__a ) , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = self.get_tokenizer()
__lowercase : Union[str, Any] = len(__a )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__a ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 149
| 0
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_lowerCamelCase : List[Any] = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def _lowerCAmelCase ( __magic_name__ :Optional[Any] ):
UpperCAmelCase_ = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
_lowerCamelCase : Dict = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def _lowerCAmelCase ( __magic_name__ :Optional[Any] ):
UpperCAmelCase_ = list(s_dict.keys() )
for key in keys:
UpperCAmelCase_ = key
for k, v in WHISPER_MAPPING.items():
if k in key:
UpperCAmelCase_ = new_key.replace(__UpperCamelCase , __UpperCamelCase )
print(F'''{key} -> {new_key}''' )
UpperCAmelCase_ = s_dict.pop(__UpperCamelCase )
return s_dict
def _lowerCAmelCase ( __magic_name__ :Optional[Any] ):
UpperCAmelCase_ = emb.weight.shape
UpperCAmelCase_ = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
UpperCAmelCase_ = emb.weight.data
return lin_layer
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :str ):
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
UpperCAmelCase_ = os.path.basename(__UpperCamelCase )
UpperCAmelCase_ = url.split('''/''' )[-2]
UpperCAmelCase_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
if os.path.exists(__UpperCamelCase ) and not os.path.isfile(__UpperCamelCase ):
raise RuntimeError(F'''{download_target} exists and is not a regular file''' )
if os.path.isfile(__UpperCamelCase ):
UpperCAmelCase_ = open(__UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(__UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(__UpperCamelCase ) as source, open(__UpperCamelCase , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=__UpperCamelCase , unit_divisor=1_0_2_4 ) as loop:
while True:
UpperCAmelCase_ = source.read(8_1_9_2 )
if not buffer:
break
output.write(__UpperCamelCase )
loop.update(len(__UpperCamelCase ) )
UpperCAmelCase_ = open(__UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(__UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def _lowerCAmelCase ( __magic_name__ :List[Any] , __magic_name__ :str ):
if ".pt" not in checkpoint_path:
UpperCAmelCase_ = _download(_MODELS[checkpoint_path] )
else:
UpperCAmelCase_ = torch.load(__UpperCamelCase , map_location='''cpu''' )
UpperCAmelCase_ = original_checkpoint["""dims"""]
UpperCAmelCase_ = original_checkpoint["""model_state_dict"""]
UpperCAmelCase_ = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(__UpperCamelCase )
rename_keys(__UpperCamelCase )
UpperCAmelCase_ = True
UpperCAmelCase_ = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
UpperCAmelCase_ = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=__UpperCamelCase , decoder_ffn_dim=__UpperCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
UpperCAmelCase_ = WhisperForConditionalGeneration(__UpperCamelCase )
UpperCAmelCase_ = model.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
if len(__UpperCamelCase ) > 0 and not set(__UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
UpperCAmelCase_ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCAmelCase_ = proj_out_weights
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_lowerCamelCase : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 721
|
from __future__ import annotations
_lowerCamelCase : Dict = 1.6_0_2_1E-1_9 # units = C
def _lowerCAmelCase ( __magic_name__ :float , __magic_name__ :float , __magic_name__ :float , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 407
| 0
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCamelCase__ ( _A ):
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def lowerCamelCase__ ( _A ):
'''simple docstring'''
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any , __lowercase : Dict ):
"""simple docstring"""
snake_case_ = metric_id
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = [MetricMock(_lowercase ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def snake_case__ ( self : str ):
"""simple docstring"""
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
if "tmp_path" in args:
snake_case_ = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(a__ , match="https://huggingface.co/docs/evaluate" ):
func(*a__ )
| 376
|
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
UpperCAmelCase : Optional[int] = ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase__ (self : Optional[int] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ (self : str ) -> int:
lowercase = (3, 3_2, 1_2_8)
lowercase = tempfile.mkdtemp()
# fmt: off
lowercase = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase = dict(zip(A__ , range(len(A__ ) ) ) )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A__ ) + "\n" )
lowercase = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 3_2, "width": 1_2_8},
}
lowercase = os.path.join(self.tmpdirname , A__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A__ , A__ )
def UpperCAmelCase__ (self : Dict , **A__ : List[Any] ) -> Union[str, Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A__ )
def UpperCAmelCase__ (self : Any , **A__ : Tuple ) -> Tuple:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A__ )
def UpperCAmelCase__ (self : Optional[int] ) -> int:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self : str ) -> List[str]:
lowercase = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
lowercase = Image.fromarray(np.moveaxis(A__ , 0 , -1 ) )
return image_input
def UpperCAmelCase__ (self : str ) -> List[Any]:
lowercase = self.get_tokenizer()
lowercase = self.get_image_processor()
lowercase = MgpstrProcessor(tokenizer=A__ , image_processor=A__ )
processor.save_pretrained(self.tmpdirname )
lowercase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=A__ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , A__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , A__ )
def UpperCAmelCase__ (self : Any ) -> Optional[int]:
lowercase = self.get_tokenizer()
lowercase = self.get_image_processor()
lowercase = MgpstrProcessor(tokenizer=A__ , image_processor=A__ )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase = self.get_image_processor(do_normalize=A__ , padding_value=1.0 )
lowercase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A__ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , A__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A__ )
def UpperCAmelCase__ (self : List[Any] ) -> List[Any]:
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = MgpstrProcessor(tokenizer=A__ , image_processor=A__ )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(A__ , return_tensors="np" )
lowercase = processor(images=A__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ (self : List[Any] ) -> Union[str, Any]:
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = MgpstrProcessor(tokenizer=A__ , image_processor=A__ )
lowercase = "test"
lowercase = processor(text=A__ )
lowercase = tokenizer(A__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ (self : int ) -> Optional[int]:
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = MgpstrProcessor(tokenizer=A__ , image_processor=A__ )
lowercase = "test"
lowercase = self.prepare_image_inputs()
lowercase = processor(text=A__ , images=A__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def UpperCAmelCase__ (self : List[Any] ) -> Union[str, Any]:
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = MgpstrProcessor(tokenizer=A__ , image_processor=A__ )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.char_decode(A__ )
lowercase = tokenizer.batch_decode(A__ )
lowercase = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(A__ , A__ )
def UpperCAmelCase__ (self : Any ) -> Optional[int]:
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = MgpstrProcessor(tokenizer=A__ , image_processor=A__ )
lowercase = None
lowercase = self.prepare_image_inputs()
lowercase = processor(text=A__ , images=A__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase__ (self : int ) -> Optional[Any]:
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = MgpstrProcessor(tokenizer=A__ , image_processor=A__ )
lowercase = torch.randn(1 , 2_7 , 3_8 )
lowercase = torch.randn(1 , 2_7 , 5_0_2_5_7 )
lowercase = torch.randn(1 , 2_7 , 3_0_5_2_2 )
lowercase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 459
|
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = "https://openaipublic.azureedge.net/jukebox/models/"
__lowerCamelCase : Tuple = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
lowercase = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
lowercase = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
lowercase = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
lowercase = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
lowercase = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
lowercase = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowercase = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
lowercase = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = {}
import re
lowercase = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
lowercase = re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowercase = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
lowercase = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
lowercase = re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowercase = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
lowercase = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
lowercase = re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
lowercase = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowerCAmelCase_ ):
lowercase = re_encoder_block_conv_in.match(lowerCAmelCase_ )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] )
lowercase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
lowercase = re_encoder_block_conv_in.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_encoder_block_resnet.fullmatch(lowerCAmelCase_ ):
lowercase = re_encoder_block_resnet.match(lowerCAmelCase_ )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] )
lowercase = {"1": 1, "3": 2}[groups[-2]]
lowercase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
lowercase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowercase = prefix + resnet_block
lowercase = re_encoder_block_resnet.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_encoder_block_proj_out.fullmatch(lowerCAmelCase_ ):
lowercase = re_encoder_block_proj_out.match(lowerCAmelCase_ )
lowercase = regex_match.groups()
lowercase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
lowercase = re_encoder_block_proj_out.sub(lowerCAmelCase_ , lowerCAmelCase_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowerCAmelCase_ ):
lowercase = re_decoder_block_conv_out.match(lowerCAmelCase_ )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowercase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
lowercase = re_decoder_block_conv_out.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_decoder_block_resnet.fullmatch(lowerCAmelCase_ ):
lowercase = re_decoder_block_resnet.match(lowerCAmelCase_ )
lowercase = regex_match.groups()
lowercase = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowercase = {"1": 1, "3": 2}[groups[-2]]
lowercase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
lowercase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowercase = prefix + resnet_block
lowercase = re_decoder_block_resnet.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_decoder_block_proj_in.fullmatch(lowerCAmelCase_ ):
lowercase = re_decoder_block_proj_in.match(lowerCAmelCase_ )
lowercase = regex_match.groups()
lowercase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
lowercase = re_decoder_block_proj_in.sub(lowerCAmelCase_ , lowerCAmelCase_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowerCAmelCase_ ):
lowercase = re_prior_cond_conv_out.match(lowerCAmelCase_ )
lowercase = regex_match.groups()
lowercase = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowercase = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
lowercase = re_prior_cond_conv_out.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_prior_cond_resnet.fullmatch(lowerCAmelCase_ ):
lowercase = re_prior_cond_resnet.match(lowerCAmelCase_ )
lowercase = regex_match.groups()
lowercase = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowercase = {"1": 1, "3": 2}[groups[-2]]
lowercase = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
lowercase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowercase = prefix + resnet_block
lowercase = re_prior_cond_resnet.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_prior_cond_proj_in.fullmatch(lowerCAmelCase_ ):
lowercase = re_prior_cond_proj_in.match(lowerCAmelCase_ )
lowercase = regex_match.groups()
lowercase = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
lowercase = re_prior_cond_proj_in.sub(lowerCAmelCase_ , lowerCAmelCase_ )
# keep original key
else:
lowercase = original_key
lowercase = replace_key(lowerCAmelCase_ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
lowercase = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
lowercase = original_key
lowercase = original_key
lowercase = value
return new_dict
@torch.no_grad()
def UpperCAmelCase_ ( lowerCAmelCase_=None , lowerCAmelCase_=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
lowercase = requests.get(f'{PREFIX}{file}' , allow_redirects=lowerCAmelCase_ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=lowerCAmelCase_ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , "wb" ).write(r.content )
lowercase = MODEL_MAPPING[model_name.split("/" )[-1]]
lowercase = JukeboxConfig.from_pretrained(lowerCAmelCase_ )
lowercase = JukeboxModel(lowerCAmelCase_ )
lowercase = []
lowercase = {}
for i, dict_name in enumerate(lowerCAmelCase_ ):
lowercase = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )["model"]
lowercase = {}
for k in old_dic.keys():
if k.endswith(".b" ):
lowercase = old_dic[k]
elif k.endswith(".w" ):
lowercase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowercase = old_dic[k]
else:
lowercase = old_dic[k]
lowercase = "vqvae" if i == 0 else f'priors.{3 - i}'
lowercase = fix_jukebox_keys(lowerCAmelCase_ , model.state_dict() , lowerCAmelCase_ , lowerCAmelCase_ )
weight_dict.append(lowerCAmelCase_ )
lowercase = weight_dict.pop(0 )
model.vqvae.load_state_dict(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , "w" ) as txtfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase_ )
return weight_dict
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
__lowerCamelCase : List[str] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 459
| 1
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
A_ : List[Any] ="""\
"""
A_ : Optional[Any] ="""
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
A_ : Any ="""
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION)
class lowercase_ ( datasets.Metric):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 16 , _UpperCAmelCase = True , _UpperCAmelCase=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
a_ = """cuda"""
else:
a_ = """cuda""" if torch.cuda.is_available() else """cpu"""
a_ = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase )
a_ = model.to(_UpperCAmelCase )
a_ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
a_ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_UpperCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
a_ = model.config.max_length - 1
else:
a_ = model.config.max_length
a_ = tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors="""pt""" , return_attention_mask=_UpperCAmelCase , ).to(_UpperCAmelCase )
a_ = encodings["""input_ids"""]
a_ = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
a_ = []
a_ = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ) ):
a_ = min(start_index + batch_size , len(_UpperCAmelCase ) )
a_ = encoded_texts[start_index:end_index]
a_ = attn_masks[start_index:end_index]
if add_start_token:
a_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCAmelCase )
a_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
a_ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCAmelCase ), attn_mask] , dim=1 )
a_ = encoded_batch
with torch.no_grad():
a_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).logits
a_ = out_logits[..., :-1, :].contiguous()
a_ = labels[..., 1:].contiguous()
a_ = attn_mask[..., 1:].contiguous()
a_ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCAmelCase )}
| 483
|
def lowerCamelCase_ ( UpperCAmelCase__ = 100 ):
"""simple docstring"""
a_ = (n * (n + 1) // 2) ** 2
a_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 483
| 1
|
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def lowercase_ ( ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=_lowercase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=_lowercase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=_lowercase , help='''where to store parsed gold_data_path file''' , )
lowerCamelCase_ : Optional[int] = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
lowerCamelCase_ : Any = json.load(_lowercase )
for dpr_record in tqdm(_lowercase ):
lowerCamelCase_ : Union[str, Any] = dpr_record['''question''']
lowerCamelCase_ : Dict = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(_lowercase ) + '''\n''' )
if __name__ == "__main__":
main()
| 357
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowercase : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357
| 1
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : int = logging.get_logger(__name__)
_A : List[str] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = "encodec"
def __init__( self : List[Any] , A : Tuple=[1.5, 3.0, 6.0, 12.0, 24.0] , A : int=2_4_0_0_0 , A : Union[str, Any]=1 , A : Any=False , A : List[Any]=None , A : List[Any]=None , A : int=1_2_8 , A : Optional[Any]=3_2 , A : Tuple=1 , A : int=[8, 5, 4, 2] , A : Dict="weight_norm" , A : Optional[Any]=7 , A : Optional[int]=7 , A : str=3 , A : Optional[Any]=2 , A : Any=True , A : Dict="reflect" , A : List[str]=2 , A : Any=2 , A : int=1.0 , A : List[Any]=1_0_2_4 , A : List[Any]=None , A : Any=True , **A : Any , ) ->Dict:
lowerCamelCase__ : List[Any] = target_bandwidths
lowerCamelCase__ : Any = sampling_rate
lowerCamelCase__ : Tuple = audio_channels
lowerCamelCase__ : Optional[Any] = normalize
lowerCamelCase__ : List[str] = chunk_length_s
lowerCamelCase__ : Tuple = overlap
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_filters
lowerCamelCase__ : str = num_residual_layers
lowerCamelCase__ : Tuple = upsampling_ratios
lowerCamelCase__ : int = norm_type
lowerCamelCase__ : Dict = kernel_size
lowerCamelCase__ : Any = last_kernel_size
lowerCamelCase__ : int = residual_kernel_size
lowerCamelCase__ : str = dilation_growth_rate
lowerCamelCase__ : Dict = use_causal_conv
lowerCamelCase__ : Tuple = pad_mode
lowerCamelCase__ : Any = compress
lowerCamelCase__ : int = num_lstm_layers
lowerCamelCase__ : Any = trim_right_ratio
lowerCamelCase__ : Tuple = codebook_size
lowerCamelCase__ : Tuple = codebook_dim if codebook_dim is not None else hidden_size
lowerCamelCase__ : str = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**A )
@property
def __lowerCamelCase ( self : str ) ->Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCamelCase ( self : Optional[Any] ) ->Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __lowerCamelCase ( self : Tuple ) ->int:
lowerCamelCase__ : int = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __lowerCamelCase ( self : int ) ->int:
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 315
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
return getitem, k
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
return setitem, k, v
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
return delitem, k
def _a ( UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
try:
return fun(UpperCAmelCase , *UpperCAmelCase ), None
except Exception as e:
return None, e
_A : List[str] = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
_A : Optional[Any] = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
_A : str = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
_A : Any = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
_A : Dict = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_A : List[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Dict = HashMap(initial_block_size=4 )
lowerCamelCase__ : List[str] = {}
for _, (fun, *args) in enumerate(UpperCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = _run_operation(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Tuple = _run_operation(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )
assert my_res == py_res
assert str(UpperCAmelCase ) == str(UpperCAmelCase )
assert set(UpperCAmelCase ) == set(UpperCAmelCase )
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
assert set(my.items() ) == set(py.items() )
def _a ( ) -> Any:
"""simple docstring"""
def is_public(UpperCAmelCase ) -> bool:
return not name.startswith('''_''' )
lowerCamelCase__ : List[Any] = {name for name in dir({} ) if is_public(UpperCAmelCase )}
lowerCamelCase__ : Dict = {name for name in dir(HashMap() ) if is_public(UpperCAmelCase )}
assert dict_public_names > hash_public_names
| 315
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a : str = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ['''OwlViTFeatureExtractor''']
a : Optional[int] = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Union[str, Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
a : Union[str, Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
a : Dict = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase_ ( ):
'''simple docstring'''
__lowercase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , snake_case_ , snake_case_ , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , **snake_case_ , ) -> Optional[int]:
'''simple docstring'''
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding='''utf-8''' ) as vocab_handle:
__lowercase = json.load(snake_case_ )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ , encoding='''utf-8''' ) as merges_handle:
__lowercase = merges_handle.read().split('''\n''' )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def A ( self ) -> List[str]:
'''simple docstring'''
return len(self.encoder )
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A ( self , snake_case_ ) -> List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(snake_case_ )
__lowercase = get_pairs(snake_case_ )
if not pairs:
return token
while True:
__lowercase = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(snake_case_ ):
try:
__lowercase = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(snake_case_ )
__lowercase = new_word
if len(snake_case_ ) == 1:
break
else:
__lowercase = get_pairs(snake_case_ )
__lowercase = ''' '''.join(snake_case_ )
__lowercase = word
return word
def A ( self , snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = []
for token in re.findall(self.pat , snake_case_ ):
__lowercase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(''' ''' ) )
return bpe_tokens
def A ( self , snake_case_ ) -> Any:
'''simple docstring'''
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def A ( self , snake_case_ ) -> Dict:
'''simple docstring'''
return self.decoder.get(snake_case_ )
def A ( self , snake_case_ ) -> List[str]:
'''simple docstring'''
__lowercase = ''''''.join(snake_case_ )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + '''\n''' )
__lowercase = 0
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
__lowercase = token_index
writer.write(''' '''.join(snake_case_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A ( self , snake_case_ , snake_case_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def A ( self , snake_case_ , snake_case_ = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A ( self , snake_case_ , snake_case_=False , **snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
__lowercase = ''' ''' + text
return (text, kwargs)
| 527
| 0
|
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
__magic_name__ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
__magic_name__ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
__magic_name__ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
__magic_name__ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] ,reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] ,)
def _a ( self : str ,_a : Optional[Any] ):
'''simple docstring'''
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def _a ( self : str ,_a : Dict ,_a : Optional[Any] ,_a : Tuple=0.9 ,_a : Optional[int]=3 ,_a : Tuple=0.5 ):
'''simple docstring'''
if NLTK_VERSION >= version.Version("""3.6.5""" ):
A_ : Union[str, Any] = [
meteor_score.single_meteor_score(
word_tokenize(_A ) ,word_tokenize(_A ) ,alpha=_A ,beta=_A ,gamma=_A )
for ref, pred in zip(_A ,_A )
]
else:
A_ : Tuple = [
meteor_score.single_meteor_score(_A ,_A ,alpha=_A ,beta=_A ,gamma=_A )
for ref, pred in zip(_A ,_A )
]
return {"meteor": np.mean(_A )}
| 665
|
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCamelCase__ = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def a__ ( lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : str = None
# source code of `config_class`
UpperCAmelCase__ : str = inspect.getsource(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
UpperCAmelCase__ : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase__ : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase__ : Any = ckpt_name
break
return checkpoint
def a__ ( ) -> Dict:
UpperCAmelCase__ : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCAmelCase__ : Any = get_checkpoint_from_config_class(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase__ : List[str] = '''\n'''.join(sorted(lowerCAmelCase__ ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 75
| 0
|
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __A:
def __init__( self : int , __UpperCamelCase : Dict , __UpperCamelCase : str=3 , __UpperCamelCase : List[str]=7 , __UpperCamelCase : str=True , __UpperCamelCase : int=True , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[int]=9_9 , __UpperCamelCase : Any=3_2 , __UpperCamelCase : str=5 , __UpperCamelCase : List[Any]=4 , __UpperCamelCase : List[Any]=3_7 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : str=5_1_2 , __UpperCamelCase : str=1_6 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Any=0.02 , __UpperCamelCase : Dict=3 , __UpperCamelCase : str=4 , __UpperCamelCase : str=None , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : List[Any] ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__UpperCamelCase , )
def lowercase__ ( self : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ):
lowerCamelCase_ = FalconModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , ):
lowerCamelCase_ = True
lowerCamelCase_ = FalconModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCamelCase_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
lowerCamelCase_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , ):
lowerCamelCase_ = FalconForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , ):
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = FalconForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# first forward pass
lowerCamelCase_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase , )
lowerCamelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["""hidden_states"""][0]
lowerCamelCase_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["""hidden_states"""][0]
# select random slice
lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (FalconForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ = FalconModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : Tuple ):
lowerCamelCase_ , *lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCamelCase_ = alibi
self.model_tester.create_and_check_model(__UpperCamelCase , *__UpperCamelCase )
def lowercase__ ( self : str ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = input_dict["""input_ids"""]
lowerCamelCase_ = input_ids.ne(1 ).to(__UpperCamelCase )
lowerCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ = FalconForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : Any ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = """single_label_classification"""
lowerCamelCase_ = input_dict["""input_ids"""]
lowerCamelCase_ = input_ids.ne(1 ).to(__UpperCamelCase )
lowerCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ = FalconForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : str ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = input_dict["""input_ids"""]
lowerCamelCase_ = FalconForCausalLM(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCamelCase_ = model(__UpperCamelCase , use_cache=__UpperCamelCase )
lowerCamelCase_ = input_ids.shape[0]
lowerCamelCase_ = model._convert_to_rw_cache(result.past_key_values )
lowerCamelCase_ = model._convert_cache_to_standard_format(__UpperCamelCase , __UpperCamelCase )
for layer in range(len(__UpperCamelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = """multi_label_classification"""
lowerCamelCase_ = input_dict["""input_ids"""]
lowerCamelCase_ = input_ids.ne(1 ).to(__UpperCamelCase )
lowerCamelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase_ = FalconForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : Dict ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__UpperCamelCase , """use_cache""" ):
return
lowerCamelCase_ = model_class(__UpperCamelCase ).to(__UpperCamelCase )
if "use_cache" not in inputs:
lowerCamelCase_ = True
lowerCamelCase_ = model(**__UpperCamelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCamelCase_ = (
getattr(__UpperCamelCase , """decoder_layers""" , __UpperCamelCase )
or getattr(__UpperCamelCase , """num_decoder_layers""" , __UpperCamelCase )
or config.num_hidden_layers
)
lowerCamelCase_ = getattr(__UpperCamelCase , """num_kv_heads""" , config.num_attention_heads )
lowerCamelCase_ = getattr(__UpperCamelCase , """d_model""" , config.hidden_size )
lowerCamelCase_ = embed_dim // num_attention_heads
lowerCamelCase_ = outputs["""past_key_values"""]
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
lowerCamelCase_ , lowerCamelCase_ = inputs["""input_ids"""].shape
for i in range(__UpperCamelCase ):
if config.new_decoder_architecture:
lowerCamelCase_ = config.num_attention_heads
elif config.multi_query:
lowerCamelCase_ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __A( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
lowerCamelCase_ = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(__UpperCamelCase )
lowerCamelCase_ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCamelCase )
lowerCamelCase_ = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
lowerCamelCase_ = model.generate(**__UpperCamelCase , do_sample=__UpperCamelCase , max_new_tokens=1_9 )
lowerCamelCase_ = tokenizer.batch_decode(__UpperCamelCase )[0]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def lowercase__ ( self : Any ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = FalconForCausalLM.from_pretrained(__UpperCamelCase )
model.eval()
model.to(__UpperCamelCase )
lowerCamelCase_ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCamelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__UpperCamelCase , do_sample=__UpperCamelCase , max_new_tokens=4 )
model.generate(**__UpperCamelCase , do_sample=__UpperCamelCase , max_new_tokens=4 )
model.generate(**__UpperCamelCase , num_beams=2 , max_new_tokens=4 )
@slow
def lowercase__ ( self : Optional[Any] ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = FalconForCausalLM.from_pretrained(__UpperCamelCase )
model.eval()
model.to(device=__UpperCamelCase )
lowerCamelCase_ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCamelCase )
# Test results are the same with and without cache
lowerCamelCase_ = model.generate(**__UpperCamelCase , do_sample=__UpperCamelCase , max_new_tokens=2_0 , use_cache=__UpperCamelCase )
lowerCamelCase_ = model.generate(**__UpperCamelCase , do_sample=__UpperCamelCase , max_new_tokens=2_0 , use_cache=__UpperCamelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 103
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A:
def __init__( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : int=3_2 , __UpperCamelCase : Any=3 , __UpperCamelCase : List[str]=1_0 , __UpperCamelCase : int=[1_0, 2_0, 3_0, 4_0] , __UpperCamelCase : List[str]=[1, 1, 2, 1] , __UpperCamelCase : str=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : Dict="relu" , __UpperCamelCase : int=3 , __UpperCamelCase : Dict=None , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embeddings_size
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_labels
lowerCamelCase_ = scope
lowerCamelCase_ = len(__UpperCamelCase )
def lowercase__ ( self : Any ):
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : str ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] ):
lowerCamelCase_ = TFRegNetModel(config=__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , training=__UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] ):
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFRegNetForImageClassification(__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : int ):
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __A( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = TFRegNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] ):
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowercase__ ( self : Optional[int] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def lowercase__ ( self : Tuple ):
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : Dict ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(__UpperCamelCase )
lowerCamelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowercase__ ( self : Any ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : int ):
def check_hidden_states_output(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Any ):
lowerCamelCase_ = model_class(__UpperCamelCase )
lowerCamelCase_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) , training=__UpperCamelCase )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase_ = layer_type
lowerCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Optional[Any] ):
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any]={} ):
lowerCamelCase_ = model(__UpperCamelCase , return_dict=__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase_ = model(__UpperCamelCase , return_dict=__UpperCamelCase , **__UpperCamelCase ).to_tuple()
def recursive_check(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ):
if isinstance(__UpperCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__UpperCamelCase , __UpperCamelCase ):
recursive_check(__UpperCamelCase , __UpperCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__UpperCamelCase , __UpperCamelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(__UpperCamelCase , __UpperCamelCase )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(__UpperCamelCase )
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
check_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
check_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
check_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , {"""output_hidden_states""": True} )
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
lowerCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
check_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , {"""output_hidden_states""": True} )
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowercase__ ( self : List[Any] ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFRegNetModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCAmelCase ( ) -> Optional[int]:
lowerCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __A( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Union[str, Any] ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=__UpperCamelCase , return_tensors="""tf""" )
# forward pass
lowerCamelCase_ = model(**__UpperCamelCase , training=__UpperCamelCase )
# verify the logits
lowerCamelCase_ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
lowerCamelCase_ = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 )
| 103
| 1
|
"""simple docstring"""
UpperCAmelCase = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
UpperCAmelCase = {value: key for key, value in encode_dict.items()}
def __magic_name__ ( _lowerCamelCase: str ) -> str:
'''simple docstring'''
lowerCAmelCase = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def __magic_name__ ( _lowerCamelCase: str ) -> str:
'''simple docstring'''
if set(_lowerCamelCase ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
lowerCAmelCase = ''''''
for word in coded.split():
while len(_lowerCamelCase ) != 0:
decoded += decode_dict[word[:5]]
lowerCAmelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 535
|
"""simple docstring"""
from collections import namedtuple
UpperCAmelCase = namedtuple("""from_to""", """from_ to""")
UpperCAmelCase = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1_0_0_0),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00_454, 264.172),
"""cubicyard""": from_to(0.76_455, 1.30_795),
"""cubicfoot""": from_to(0.028, 35.3_147),
"""cup""": from_to(0.000_236_588, 4_226.75),
}
def __magic_name__ ( _lowerCamelCase: float, _lowerCamelCase: str, _lowerCamelCase: str ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ''', '''.join(_lowerCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ''', '''.join(_lowerCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 535
| 1
|
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A__: Union[str, Any] = logging.get_logger(__name__)
A__: List[Any] = {'''vocab_file''': '''spiece.model'''}
A__: int = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
A__: Dict = {
'''AI-Sweden/gpt-sw3-126m''': 2048,
'''AI-Sweden/gpt-sw3-350m''': 2048,
'''AI-Sweden/gpt-sw3-1.6b''': 2048,
'''AI-Sweden/gpt-sw3-6.7b''': 2048,
'''AI-Sweden/gpt-sw3-20b''': 2048,
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[str] = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : List[Any] = ["input_ids", "attention_mask"]
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Union[str, Any]=False , SCREAMING_SNAKE_CASE :Tuple=False , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Optional[int]=None , SCREAMING_SNAKE_CASE :Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE :Dict , ) -> None:
'''simple docstring'''
_a : str ={} if sp_model_kwargs is None else sp_model_kwargs
_a : Union[str, Any] =kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
_a : Tuple ="""None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_a : List[Any] ="""<|endoftext|>""" if eos_token is None else eos_token
_a : int ="""<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_a : Tuple =unk_token if pad_token is None else pad_token
_a : List[Any] =eos_token if bos_token is None else bos_token
else:
_a : List[str] ="""<pad>""" if pad_token is None else pad_token
_a : Any ="""<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_a : str =do_lower_case
_a : Optional[Any] =remove_space
_a : List[Any] =keep_accents
_a : Optional[int] =vocab_file
_a : Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE )
# Used for whitespace normalization in input texts
# fmt : off
_a : Optional[int] ={""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_a : int =re.compile(
f"[{''.join(map(SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]" )
def __getstate__( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =self.__dict__.copy()
_a : Union[str, Any] =None
return state
def __setstate__( self :str , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_a : List[str] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Dict ={}
_a : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __UpperCAmelCase ( self :List[str] ) -> int:
'''simple docstring'''
return len(self.sp_model )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> str:
'''simple docstring'''
_a : Tuple =self.non_printing_characters_re.sub("""""" , SCREAMING_SNAKE_CASE )
# Normalize whitespaces
_a : str ="""""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
_a : List[str] =unicodedata.normalize("""NFC""" , SCREAMING_SNAKE_CASE )
return text
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :str , **SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
'''simple docstring'''
_a : Any =self.preprocess_text(SCREAMING_SNAKE_CASE )
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :int ) -> str:
'''simple docstring'''
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE )
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :str ) -> str:
'''simple docstring'''
return out_string
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :List[str] ) -> str:
'''simple docstring'''
_a : List[Any] =[]
_a : Optional[int] =""""""
_a : Any =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE ) + token
_a : Dict =True
_a : Optional[Any] =[]
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE )
return out_string
def __UpperCAmelCase ( self :List[Any] ) -> Dict[str, int]:
'''simple docstring'''
_a : Tuple ={self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a : int =os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , """wb""" ) as fi:
_a : str =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, List[str]] , SCREAMING_SNAKE_CASE :Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_a : str =self.preprocess_text(SCREAMING_SNAKE_CASE )
_a : Tuple =self.sp_model.encode(SCREAMING_SNAKE_CASE )
else:
_a : Any =[self.preprocess_text(SCREAMING_SNAKE_CASE ) for t in text]
_a : List[Any] =self.sp_model.encode(SCREAMING_SNAKE_CASE )
if return_tensors is True or return_tensors == "pt":
_a : List[str] =torch.tensor(SCREAMING_SNAKE_CASE )
return token_ids
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[int, List[int]] ) -> str:
'''simple docstring'''
return self.sp_model.decode(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :"Conversation" ) -> List[int]:
'''simple docstring'''
_a : Tuple =[f"User: {text}" if is_user else f"Bot: {text}" for is_user, text in conversation.iter_texts()]
_a : List[Any] =(
f"{self.eos_token}{self.bos_token}" + f"{self.bos_token}".join(SCREAMING_SNAKE_CASE ) + f"{self.bos_token}Bot:"
)
return self.encode(text=SCREAMING_SNAKE_CASE )
| 506
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self :Any , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any]=1_2 , SCREAMING_SNAKE_CASE :List[str]=7 , SCREAMING_SNAKE_CASE :str=True , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :Union[str, Any]=True , SCREAMING_SNAKE_CASE :List[str]=9_9 , SCREAMING_SNAKE_CASE :Optional[int]=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :List[str]=3_7 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :Any=0.1 , SCREAMING_SNAKE_CASE :str=5_1_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Optional[int]=0 , SCREAMING_SNAKE_CASE :List[Any]=None , ) -> List[Any]:
'''simple docstring'''
_a : List[str] =parent
_a : Dict =batch_size
_a : Optional[int] =seq_length
_a : Any =is_training
_a : Optional[Any] =use_input_mask
_a : List[str] =use_labels
_a : List[str] =vocab_size
_a : Any =hidden_size
_a : Optional[Any] =projection_dim
_a : Any =num_hidden_layers
_a : List[str] =num_attention_heads
_a : Any =intermediate_size
_a : List[Any] =dropout
_a : Any =attention_dropout
_a : Any =max_position_embeddings
_a : Optional[int] =initializer_range
_a : int =scope
_a : Dict =bos_token_id
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
_a : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Union[str, Any] =None
if self.use_input_mask:
_a : int =random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_a : Any =input_mask.numpy()
_a , _a : Any =input_mask.shape
_a : str =np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE ):
_a : str =1
_a : int =0
_a : int =self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Tuple ) -> Optional[int]:
'''simple docstring'''
_a : Optional[Any] =TFBlipTextModel(config=SCREAMING_SNAKE_CASE )
_a : Dict =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
_a : Tuple =model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
_a : Dict =self.prepare_config_and_inputs()
_a , _a , _a : List[Any] =config_and_inputs
_a : List[str] ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
__UpperCamelCase : Tuple = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Union[str, Any] = False
def __UpperCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
_a : Tuple =BlipTextModelTester(self )
_a : int =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
_a : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def __UpperCAmelCase ( self :Optional[int] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self :int ) -> Tuple:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any =TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Tuple=True ) -> str:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE )
| 506
| 1
|
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCAmelCase = logging.get_logger(__name__)
enable_full_determinism()
class UpperCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ = UNetaDModel
snake_case__ = '''sample'''
@property
def _UpperCamelCase ( self : Dict ) -> List[str]:
_UpperCamelCase = 4
_UpperCamelCase = 3
_UpperCamelCase = (32, 32)
_UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
_UpperCamelCase = torch.tensor([10] ).to(__UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
return (3, 32, 32)
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
return (3, 32, 32)
def _UpperCamelCase ( self : Tuple ) -> int:
_UpperCamelCase = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
_UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
class UpperCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ = UNetaDModel
snake_case__ = '''sample'''
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase = 4
_UpperCamelCase = 4
_UpperCamelCase = (32, 32)
_UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
_UpperCamelCase = torch.tensor([10] ).to(__UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self : List[Any] ) -> str:
return (4, 32, 32)
@property
def _UpperCamelCase ( self : Tuple ) -> Dict:
return (4, 32, 32)
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
_UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self : List[Any] ) -> Dict:
_UpperCamelCase , _UpperCamelCase = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__UpperCamelCase )
_UpperCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self : List[Any] ) -> Tuple:
_UpperCamelCase , _UpperCamelCase = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=__UpperCamelCase )
model.to(__UpperCamelCase )
_UpperCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
_UpperCamelCase , _UpperCamelCase = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=__UpperCamelCase )
model_accelerate.to(__UpperCamelCase )
model_accelerate.eval()
_UpperCamelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCamelCase = noise.to(__UpperCamelCase )
_UpperCamelCase = torch.tensor([10] * noise.shape[0] ).to(__UpperCamelCase )
_UpperCamelCase = model_accelerate(__UpperCamelCase , __UpperCamelCase )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_UpperCamelCase , _UpperCamelCase = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=__UpperCamelCase , low_cpu_mem_usage=__UpperCamelCase )
model_normal_load.to(__UpperCamelCase )
model_normal_load.eval()
_UpperCamelCase = model_normal_load(__UpperCamelCase , __UpperCamelCase )['''sample''']
assert torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-3 )
def _UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(__UpperCamelCase )
_UpperCamelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCamelCase = noise.to(__UpperCamelCase )
_UpperCamelCase = torch.tensor([10] * noise.shape[0] ).to(__UpperCamelCase )
with torch.no_grad():
_UpperCamelCase = model(__UpperCamelCase , __UpperCamelCase ).sample
_UpperCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCamelCase = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-3 ) )
class UpperCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ = UNetaDModel
snake_case__ = '''sample'''
@property
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Union[str, Any]=(32, 32) ) -> Union[str, Any]:
_UpperCamelCase = 4
_UpperCamelCase = 3
_UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
_UpperCamelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self : Optional[Any] ) -> Dict:
return (3, 32, 32)
def _UpperCamelCase ( self : Tuple ) -> str:
_UpperCamelCase = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
_UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
_UpperCamelCase , _UpperCamelCase = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__UpperCamelCase )
_UpperCamelCase = self.dummy_input
_UpperCamelCase = floats_tensor((4, 3) + (256, 256) ).to(__UpperCamelCase )
_UpperCamelCase = noise
_UpperCamelCase = model(**__UpperCamelCase )
assert image is not None, "Make sure output is not None"
@slow
def _UpperCamelCase ( self : List[str] ) -> int:
_UpperCamelCase = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(__UpperCamelCase )
_UpperCamelCase = 4
_UpperCamelCase = 3
_UpperCamelCase = (256, 256)
_UpperCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
_UpperCamelCase = torch.tensor(batch_size * [1E-4] ).to(__UpperCamelCase )
with torch.no_grad():
_UpperCamelCase = model(__UpperCamelCase , __UpperCamelCase ).sample
_UpperCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCamelCase = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-2 ) )
def _UpperCamelCase ( self : Any ) -> Any:
_UpperCamelCase = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(__UpperCamelCase )
_UpperCamelCase = 4
_UpperCamelCase = 3
_UpperCamelCase = (32, 32)
_UpperCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
_UpperCamelCase = torch.tensor(batch_size * [1E-4] ).to(__UpperCamelCase )
with torch.no_grad():
_UpperCamelCase = model(__UpperCamelCase , __UpperCamelCase ).sample
_UpperCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCamelCase = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-2 ) )
def _UpperCamelCase ( self : int ) -> Optional[Any]:
# not required for this model
pass
| 420
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
UpperCAmelCase = 6_37_81_37.0
UpperCAmelCase = 6_35_67_52.31_42_45
UpperCAmelCase = 6_378_137
def lowercase ( a__ : float , a__ : float , a__ : float , a__ : float ) -> float:
_UpperCamelCase = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCamelCase = atan((1 - flattening) * tan(radians(a__ ) ) )
_UpperCamelCase = atan((1 - flattening) * tan(radians(a__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCamelCase = haversine_distance(a__ , a__ , a__ , a__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCamelCase = (b_lata + b_lata) / 2
_UpperCamelCase = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCamelCase = (sin(a__ ) ** 2) * (cos(a__ ) ** 2)
_UpperCamelCase = cos(sigma / 2 ) ** 2
_UpperCamelCase = (sigma - sin(a__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCamelCase = (cos(a__ ) ** 2) * (sin(a__ ) ** 2)
_UpperCamelCase = sin(sigma / 2 ) ** 2
_UpperCamelCase = (sigma + sin(a__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 420
| 1
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCamelCase__ ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = StableUnCLIPPipeline
_lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
_lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCAmelCase = False
def __snake_case ( self ):
A__ : List[Any] = 32
A__ : Union[str, Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A__ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
A__ : Any = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A_ , projection_dim=A_ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
A__ : Optional[int] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=A_ , num_layers=1 , )
torch.manual_seed(0 )
A__ : Optional[Any] = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=A_ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
A__ : Tuple = StableUnCLIPImageNormalizer(embedding_dim=A_ )
A__ : Optional[int] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
A__ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
A__ : Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
A__ : Optional[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=A_ , layers_per_block=1 , upcast_attention=A_ , use_linear_projection=A_ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=A_ , steps_offset=1 , )
torch.manual_seed(0 )
A__ : Any = AutoencoderKL()
A__ : List[Any] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=0 ):
if str(A_ ).startswith('''mps''' ):
A__ : List[str] = torch.manual_seed(A_ )
else:
A__ : str = torch.Generator(device=A_ ).manual_seed(A_ )
A__ : int = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __snake_case ( self ):
A__ : Any = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=A_ )
def __snake_case ( self ):
A__ : List[str] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=A_ )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ):
A__ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
A__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
A__ : Tuple = pipe('''anime turle''' , generator=A_ , output_type='''np''' )
A__ : str = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ , A_ )
def __snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
A__ : List[Any] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ : List[Any] = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
A__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 716
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : List[Any] = add_prefix_space
A__ : Any = pre_tok_class(**UpperCamelCase__ )
A__ : List[Any] = add_prefix_space
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 55
| 0
|
__A = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__A = [{'type': 'code', 'content': INSTALL_CONTENT}]
__A = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 484
|
def __A ( _lowercase = 2_00 ):
'''simple docstring'''
_A = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
_A = [0] * (pence + 1)
_A = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 484
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : Optional[Any] = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
UpperCamelCase : int = {
"input_ids": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
UpperCamelCase : List[Any] = model(lowerCamelCase )["last_hidden_state"]
UpperCamelCase : str = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , lowerCamelCase )
# compare the actual values for a slice.
UpperCamelCase : int = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 435
|
'''simple docstring'''
from __future__ import annotations
import math
def A__ ( A : int):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase_ = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def A__ ( A : int):
'''simple docstring'''
if not isinstance(A , A):
raise ValueError("n must be an integer")
if n <= 0:
raise ValueError("n must be >= 0")
UpperCamelCase : Union[str, Any] = []
for num in range(len(A)):
UpperCamelCase : Any = 0
while 2 * i * i <= odd_composites[num]:
UpperCamelCase : str = odd_composites[num] - 2 * i * i
if is_prime(A):
break
i += 1
else:
list_nums.append(odd_composites[num])
if len(A) == n:
return list_nums
return []
def A__ ( ):
'''simple docstring'''
return compute_nums(1)[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 435
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str ) -> bool:
__A : Union[str, Any] = 0
for ch in input_str:
__A : str = ord(__snake_case )
__A : Any = pow(2 , __snake_case )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowerCamelCase ( UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Dict=None , UpperCamelCase : str=None ) -> List[str]:
if attention_mask is None:
_lowerCamelCase = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
lowerCAmelCase_ = OPTConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = 'gelu'
def __init__( self : Dict , snake_case__ : List[Any] , snake_case__ : Any=1_3 , snake_case__ : List[str]=7 , snake_case__ : Optional[Any]=True , snake_case__ : int=False , snake_case__ : Any=9_9 , snake_case__ : Optional[int]=1_6 , snake_case__ : str=2 , snake_case__ : List[str]=4 , snake_case__ : Union[str, Any]=4 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Optional[Any]=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Any=2_0 , snake_case__ : Any=2 , snake_case__ : Optional[int]=1 , snake_case__ : Optional[Any]=0 , snake_case__ : int=1_6 , snake_case__ : List[Any]=1_6 , ) -> List[Any]:
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = eos_token_id
_lowerCamelCase = pad_token_id
_lowerCamelCase = bos_token_id
_lowerCamelCase = embed_dim
_lowerCamelCase = word_embed_proj_dim
_lowerCamelCase = False
def _snake_case ( self : List[Any] ) -> Optional[int]:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=snake_case__ , **self.config_updates , )
_lowerCamelCase = prepare_opt_inputs_dict(snake_case__ , snake_case__ )
return config, inputs_dict
def _snake_case ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ) -> Optional[int]:
_lowerCamelCase = TFOPTModel(config=snake_case__ )
_lowerCamelCase = inputs_dict['input_ids']
_lowerCamelCase = input_ids[:1, :]
_lowerCamelCase = inputs_dict['attention_mask'][:1, :]
_lowerCamelCase = 1
# first forward pass
_lowerCamelCase = model(snake_case__ , attention_mask=snake_case__ , use_cache=snake_case__ )
_lowerCamelCase , _lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCamelCase = model(snake_case__ , attention_mask=snake_case__ )[0]
_lowerCamelCase = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 )
@require_tf
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCAmelCase_ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCAmelCase_ = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = 10
def _snake_case ( self : List[Any] ) -> int:
_lowerCamelCase = TFOPTModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=snake_case__ )
def _snake_case ( self : List[str] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _snake_case ( self : Dict ) -> Union[str, Any]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
def _snake_case ( self : Optional[int] ) -> Optional[Any]:
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(snake_case__ : List[Any] , snake_case__ : Dict ):
if hasattr(snake_case__ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(snake_case__ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
_lowerCamelCase = model_class(config=snake_case__ )
_lowerCamelCase = _get_word_embedding_weight(snake_case__ , model.get_input_embeddings() )
_lowerCamelCase = _get_word_embedding_weight(snake_case__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(snake_case__ )
_lowerCamelCase = _get_word_embedding_weight(snake_case__ , model.get_input_embeddings() )
_lowerCamelCase = _get_word_embedding_weight(snake_case__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_lowerCamelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , snake_case__ )
# check that weights remain the same after resizing
_lowerCamelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCamelCase = False
self.assertTrue(snake_case__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , snake_case__ )
_lowerCamelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCamelCase = False
self.assertTrue(snake_case__ )
def lowerCamelCase ( UpperCamelCase : str ) -> List[str]:
return tf.constant(UpperCamelCase , dtype=tf.intaa )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = 99
def _snake_case ( self : Any ) -> int:
_lowerCamelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_lowerCamelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_lowerCamelCase = input_ids.shape[0]
_lowerCamelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : List[Any] ) -> str:
_lowerCamelCase = TFOPTModel.from_pretrained('facebook/opt-350m' )
_lowerCamelCase = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase = tf.not_equal(snake_case__ , model.config.pad_token_id )
with tf.GradientTape():
_lowerCamelCase = model(input_ids=snake_case__ , attention_mask=snake_case__ ).last_hidden_state
_lowerCamelCase = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , snake_case__ )
_lowerCamelCase = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case__ , atol=4e-3 ) )
_lowerCamelCase = tf.function(snake_case__ , jit_compile=snake_case__ )
_lowerCamelCase = xla_generate(snake_case__ , snake_case__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case__ , atol=4e-2 ) )
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Tuple ) -> List[str]:
super().setUp()
_lowerCamelCase = 'facebook/opt-350m'
def _snake_case ( self : Union[str, Any] ) -> Dict:
_lowerCamelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
_lowerCamelCase = GPTaTokenizer.from_pretrained(self.path_model )
_lowerCamelCase = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_lowerCamelCase = tokenizer(snake_case__ , return_tensors='tf' , padding=snake_case__ , add_special_tokens=snake_case__ )
_lowerCamelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_lowerCamelCase = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-4 ) )
_lowerCamelCase = tf.function(snake_case__ , jit_compile=snake_case__ )
_lowerCamelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-4 ) )
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _snake_case ( self : str ) -> List[str]:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _snake_case ( self : Dict ) -> Optional[int]:
_lowerCamelCase = 'facebook/opt-125m'
_lowerCamelCase = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_lowerCamelCase = []
_lowerCamelCase = GPTaTokenizer.from_pretrained(snake_case__ )
_lowerCamelCase = TFOPTForCausalLM.from_pretrained(snake_case__ )
for prompt in self.prompts:
_lowerCamelCase = tokenizer(snake_case__ , return_tensors='tf' ).input_ids
_lowerCamelCase = model.generate(snake_case__ , max_length=1_0 )
_lowerCamelCase = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
predicted_outputs += generated_string
self.assertListEqual(snake_case__ , snake_case__ )
def _snake_case ( self : int ) -> Dict:
_lowerCamelCase = 'facebook/opt-350m'
_lowerCamelCase = GPTaTokenizer.from_pretrained(snake_case__ )
_lowerCamelCase = TFOPTForCausalLM.from_pretrained(snake_case__ )
_lowerCamelCase = 'left'
# use different length sentences to test batching
_lowerCamelCase = [
'Hello, my dog is a little',
'Today, I',
]
_lowerCamelCase = tokenizer(snake_case__ , return_tensors='tf' , padding=snake_case__ )
_lowerCamelCase = inputs['input_ids']
_lowerCamelCase = model.generate(input_ids=snake_case__ , attention_mask=inputs['attention_mask'] )
_lowerCamelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_lowerCamelCase = model.generate(input_ids=snake_case__ )
_lowerCamelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
_lowerCamelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_lowerCamelCase = model.generate(input_ids=snake_case__ , max_length=model.config.max_length - num_paddings )
_lowerCamelCase = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ )
_lowerCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ )
_lowerCamelCase = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
_lowerCamelCase = 'facebook/opt-350m'
_lowerCamelCase = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_lowerCamelCase = []
_lowerCamelCase = GPTaTokenizer.from_pretrained(snake_case__ )
_lowerCamelCase = TFOPTForCausalLM.from_pretrained(snake_case__ )
for prompt in self.prompts:
_lowerCamelCase = tokenizer(snake_case__ , return_tensors='tf' ).input_ids
_lowerCamelCase = model.generate(snake_case__ , max_length=1_0 )
_lowerCamelCase = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
predicted_outputs += generated_string
self.assertListEqual(snake_case__ , snake_case__ )
| 544
| 0
|
def lowercase_ ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
snake_case = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case = min(A__ , A__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 294
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Optional[int] = "roberta"
def __init__(self : Union[str, Any] , _A : List[Any]=5_0_2_6_5 , _A : Dict=7_6_8 , _A : Tuple=1_2 , _A : Optional[Any]=1_2 , _A : int=3_0_7_2 , _A : List[str]="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[int]=5_1_2 , _A : Dict=2 , _A : Optional[Any]=0.02 , _A : Optional[Any]=1E-12 , _A : str=1 , _A : Dict=0 , _A : Optional[int]=2 , _A : int="absolute" , _A : Any=True , _A : Union[str, Any]=None , **_A : Optional[int] , ) -> Tuple:
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class lowerCamelCase ( A_ ):
@property
def UpperCAmelCase(self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 294
| 1
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __UpperCamelCase ( UpperCamelCase ):
def __init__( self : Tuple , UpperCAmelCase : Optional[NestedDataStructureLike[PathLike]] = None , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : List[Any] , ) -> List[Any]:
lowerCAmelCase :List[Any] = path_or_paths
lowerCAmelCase :Union[str, Any] = split if split or isinstance(UpperCAmelCase , UpperCAmelCase ) else 'train'
lowerCAmelCase :Any = features
lowerCAmelCase :Dict = cache_dir
lowerCAmelCase :int = keep_in_memory
lowerCAmelCase :Optional[int] = streaming
lowerCAmelCase :Tuple = num_proc
lowerCAmelCase :Tuple = kwargs
@abstractmethod
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class __UpperCamelCase ( UpperCamelCase ):
def __init__( self : Dict , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : List[str] , ) -> Union[str, Any]:
lowerCAmelCase :int = features
lowerCAmelCase :Tuple = cache_dir
lowerCAmelCase :Tuple = keep_in_memory
lowerCAmelCase :Optional[Any] = streaming
lowerCAmelCase :Optional[int] = num_proc
lowerCAmelCase :Tuple = kwargs
@abstractmethod
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[Dataset, IterableDataset]:
pass
| 553
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 553
| 1
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__lowercase = True
except (ImportError, AttributeError):
__lowercase = object
def lowerCamelCase ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
__lowercase = False
__lowercase = logging.get_logger('''transformers-cli/serving''')
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(SCREAMING_SNAKE_CASE , args.host , args.port , args.workers )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : dict
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str]
a__ : Optional[List[int]]
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Any
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@staticmethod
def UpperCamelCase__ ( __lowercase) -> Union[str, Any]:
__UpperCamelCase :str = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''')
serve_parser.add_argument(
'''--task''' , type=__lowercase , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=__lowercase , default='''localhost''' , help='''Interface the server will listen on.''')
serve_parser.add_argument('''--port''' , type=__lowercase , default=8_888 , help='''Port the serving will listen to.''')
serve_parser.add_argument('''--workers''' , type=__lowercase , default=1 , help='''Number of http workers''')
serve_parser.add_argument('''--model''' , type=__lowercase , help='''Model\'s name or path to stored model.''')
serve_parser.add_argument('''--config''' , type=__lowercase , help='''Model\'s config name or path to stored model.''')
serve_parser.add_argument('''--tokenizer''' , type=__lowercase , help='''Tokenizer name to use.''')
serve_parser.add_argument(
'''--device''' , type=__lowercase , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=__lowercase)
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase) -> Any:
__UpperCamelCase :Optional[Any] = pipeline
__UpperCamelCase :Optional[int] = host
__UpperCamelCase :Dict = port
__UpperCamelCase :Dict = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''')
else:
logger.info(f"""Serving model over {host}:{port}""")
__UpperCamelCase :Optional[Any] = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=__lowercase , response_class=__lowercase , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=__lowercase , response_class=__lowercase , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=__lowercase , response_class=__lowercase , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=__lowercase , response_class=__lowercase , methods=['''POST'''] , ),
] , timeout=600 , )
def UpperCamelCase__ ( self) -> Any:
run(self._app , host=self.host , port=self.port , workers=self.workers)
def UpperCamelCase__ ( self) -> str:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config))
def UpperCamelCase__ ( self , __lowercase = Body(__lowercase , embed=__lowercase) , __lowercase = Body(__lowercase , embed=__lowercase)) -> Any:
try:
__UpperCamelCase :Any = self._pipeline.tokenizer.tokenize(__lowercase)
if return_ids:
__UpperCamelCase :str = self._pipeline.tokenizer.convert_tokens_to_ids(__lowercase)
return ServeTokenizeResult(tokens=__lowercase , tokens_ids=__lowercase)
else:
return ServeTokenizeResult(tokens=__lowercase)
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(__lowercase)})
def UpperCamelCase__ ( self , __lowercase = Body(__lowercase , embed=__lowercase) , __lowercase = Body(__lowercase , embed=__lowercase) , __lowercase = Body(__lowercase , embed=__lowercase) , ) -> int:
try:
__UpperCamelCase :List[str] = self._pipeline.tokenizer.decode(__lowercase , __lowercase , __lowercase)
return ServeDeTokenizeResult(model='''''' , text=__lowercase)
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(__lowercase)})
async def UpperCamelCase__ ( self , __lowercase=Body(__lowercase , embed=__lowercase)) -> int:
# Check we don't have empty string
if len(__lowercase) == 0:
return ServeForwardResult(output=[] , attention=[])
try:
# Forward through the model
__UpperCamelCase :Union[str, Any] = self._pipeline(__lowercase)
return ServeForwardResult(output=__lowercase)
except Exception as e:
raise HTTPException(500 , {'''error''': str(__lowercase)})
| 452
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=2 , __lowercase=8 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=99 , __lowercase=16 , __lowercase=5 , __lowercase=2 , __lowercase=36 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=512 , __lowercase=16 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> List[str]:
__UpperCamelCase :Union[str, Any] = parent
__UpperCamelCase :str = batch_size
__UpperCamelCase :Union[str, Any] = seq_length
__UpperCamelCase :Optional[Any] = is_training
__UpperCamelCase :Union[str, Any] = use_input_mask
__UpperCamelCase :Any = use_token_type_ids
__UpperCamelCase :List[str] = use_labels
__UpperCamelCase :Tuple = vocab_size
__UpperCamelCase :Tuple = hidden_size
__UpperCamelCase :Optional[Any] = num_hidden_layers
__UpperCamelCase :Tuple = num_attention_heads
__UpperCamelCase :Any = intermediate_size
__UpperCamelCase :Optional[Any] = hidden_act
__UpperCamelCase :Any = hidden_dropout_prob
__UpperCamelCase :str = attention_probs_dropout_prob
__UpperCamelCase :Optional[Any] = max_position_embeddings
__UpperCamelCase :int = type_vocab_size
__UpperCamelCase :Optional[int] = type_sequence_label_size
__UpperCamelCase :Any = initializer_range
__UpperCamelCase :List[str] = num_labels
__UpperCamelCase :Dict = num_choices
__UpperCamelCase :Union[str, Any] = scope
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :List[Any] = None
if self.use_input_mask:
__UpperCamelCase :Any = random_attention_mask([self.batch_size, self.seq_length])
__UpperCamelCase :Union[str, Any] = None
if self.use_token_type_ids:
__UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCamelCase :List[str] = None
__UpperCamelCase :Tuple = None
__UpperCamelCase :Union[str, Any] = None
if self.use_labels:
__UpperCamelCase :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self) -> Union[str, Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = self.get_config()
__UpperCamelCase :List[str] = 300
return config
def UpperCamelCase__ ( self) -> int:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :Tuple = self.prepare_config_and_inputs()
__UpperCamelCase :Union[str, Any] = True
__UpperCamelCase :List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
__UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> int:
__UpperCamelCase :int = MraModel(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :str = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase)
__UpperCamelCase :Tuple = model(__lowercase , token_type_ids=__lowercase)
__UpperCamelCase :str = model(__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> List[Any]:
__UpperCamelCase :Tuple = True
__UpperCamelCase :Dict = MraModel(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :int = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__UpperCamelCase :Any = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , encoder_hidden_states=__lowercase , )
__UpperCamelCase :str = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Dict = MraForMaskedLM(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Optional[Any] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> int:
__UpperCamelCase :Tuple = MraForQuestionAnswering(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Any = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> List[str]:
__UpperCamelCase :List[Any] = self.num_labels
__UpperCamelCase :str = MraForSequenceClassification(__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Dict = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Optional[int]:
__UpperCamelCase :str = self.num_labels
__UpperCamelCase :Optional[int] = MraForTokenClassification(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Optional[Any] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :Optional[Any] = self.num_choices
__UpperCamelCase :Dict = MraForMultipleChoice(config=__lowercase)
model.to(__lowercase)
model.eval()
__UpperCamelCase :Tuple = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :int = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :Union[str, Any] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :str = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :str = config_and_inputs
__UpperCamelCase :List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[str] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
a__ : Optional[int] = False
a__ : Optional[int] = False
a__ : str = False
a__ : Optional[Any] = False
a__ : Union[str, Any] = ()
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :str = MraModelTester(self)
__UpperCamelCase :Tuple = ConfigTester(self , config_class=__lowercase , hidden_size=37)
def UpperCamelCase__ ( self) -> Dict:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase :Optional[int] = type
self.model_tester.create_and_check_model(*__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowercase)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase)
@slow
def UpperCamelCase__ ( self) -> Dict:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :List[Any] = MraModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
@unittest.skip(reason='''MRA does not output attentions''')
def UpperCamelCase__ ( self) -> Any:
return
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :int = MraModel.from_pretrained('''uw-madison/mra-base-512-4''')
__UpperCamelCase :Union[str, Any] = torch.arange(256).unsqueeze(0)
with torch.no_grad():
__UpperCamelCase :List[Any] = model(__lowercase)[0]
__UpperCamelCase :Dict = torch.Size((1, 256, 768))
self.assertEqual(output.shape , __lowercase)
__UpperCamelCase :Optional[Any] = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4))
@slow
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :str = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''')
__UpperCamelCase :str = torch.arange(256).unsqueeze(0)
with torch.no_grad():
__UpperCamelCase :Optional[Any] = model(__lowercase)[0]
__UpperCamelCase :List[str] = 50_265
__UpperCamelCase :List[Any] = torch.Size((1, 256, vocab_size))
self.assertEqual(output.shape , __lowercase)
__UpperCamelCase :Optional[Any] = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4))
@slow
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Dict = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''')
__UpperCamelCase :Optional[int] = torch.arange(4_096).unsqueeze(0)
with torch.no_grad():
__UpperCamelCase :Tuple = model(__lowercase)[0]
__UpperCamelCase :Optional[int] = 50_265
__UpperCamelCase :Optional[Any] = torch.Size((1, 4_096, vocab_size))
self.assertEqual(output.shape , __lowercase)
__UpperCamelCase :List[str] = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4))
| 452
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ :Optional[Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Union[str, Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
a_ :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__a :str = logging.get_logger(__name__)
__a :Any = Dict[str, Any]
__a :int = List[Prediction]
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __A ( self : str , **UpperCAmelCase : str ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ):
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : Any ):
A_ = load_image(UpperCAmelCase )
A_ = torch.IntTensor([[image.height, image.width]] )
A_ = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
A_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
A_ = target_size
return inputs
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ):
A_ = model_inputs.pop("target_size" )
A_ = self.model(**UpperCAmelCase )
A_ = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
A_ = model_inputs["bbox"]
return model_outputs
def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 ):
A_ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A_ , A_ = target_size[0].tolist()
def unnormalize(UpperCAmelCase : Any ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A_ , A_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A_ = [unnormalize(UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )]
A_ = ["score", "label", "box"]
A_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(scores.tolist() , UpperCAmelCase , UpperCAmelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A_ = self.image_processor.post_process_object_detection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = raw_annotations[0]
A_ = raw_annotation["scores"]
A_ = raw_annotation["labels"]
A_ = raw_annotation["boxes"]
A_ = scores.tolist()
A_ = [self.model.config.idalabel[label.item()] for label in labels]
A_ = [self._get_bounding_box(UpperCAmelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A_ = ["score", "label", "box"]
A_ = [
dict(zip(UpperCAmelCase , UpperCAmelCase ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def __A ( self : Tuple , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 86
| 0
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCAmelCase = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''retribert'''
def __init__( self : Optional[int] , __UpperCamelCase : Optional[Any]=3_0522 , __UpperCamelCase : List[str]=768 , __UpperCamelCase : int=8 , __UpperCamelCase : int=12 , __UpperCamelCase : Optional[Any]=3072 , __UpperCamelCase : int="gelu" , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : List[str]=512 , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : Tuple=0.0_2 , __UpperCamelCase : List[str]=1E-12 , __UpperCamelCase : str=True , __UpperCamelCase : Optional[Any]=128 , __UpperCamelCase : List[str]=0 , **__UpperCamelCase : str , ) -> Dict:
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = share_encoders
_UpperCamelCase = projection_dim
| 342
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : List[Any] ) -> List[str]:
_UpperCamelCase = 0
def _UpperCamelCase ( self : Any ) -> str:
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Any ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> List[str]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : int ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
_UpperCamelCase = CLIPImageProcessor(**__UpperCamelCase )
# save in new folder
model_config.save_pretrained(__UpperCamelCase )
config.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
# make sure private variable is not incorrectly saved
_UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
with self.assertRaisesRegex(
__UpperCamelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''clip-base''' )
def _UpperCamelCase ( self : Dict ) -> Union[str, Any]:
with self.assertRaisesRegex(
__UpperCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase , revision='''aaaaaa''' )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
with self.assertRaisesRegex(
__UpperCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def _UpperCamelCase ( self : int ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__UpperCamelCase ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCamelCase ):
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase , trust_remote_code=__UpperCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCamelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCamelCase ( self : List[str] ) -> Optional[Any]:
class UpperCAmelCase_ ( _lowercase):
snake_case__ = True
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# If remote code is not set, the default is to use local
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__UpperCamelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 342
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =DDIMPipeline
UpperCamelCase__ =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase__ =PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
UpperCamelCase__ =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase__ =False
def snake_case_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ :List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
UpperCAmelCase_ :str = DDIMScheduler()
UpperCAmelCase_ :Optional[int] = {'''unet''': unet, '''scheduler''': scheduler}
return components
def snake_case_ ( self : Optional[int] , snake_case : List[str] , snake_case : Optional[int]=0 ):
if str(snake_case ).startswith('''mps''' ):
UpperCAmelCase_ :Tuple = torch.manual_seed(snake_case )
else:
UpperCAmelCase_ :List[Any] = torch.Generator(device=snake_case ).manual_seed(snake_case )
UpperCAmelCase_ :Dict = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def snake_case_ ( self : Dict ):
UpperCAmelCase_ :List[str] = '''cpu'''
UpperCAmelCase_ :Optional[int] = self.get_dummy_components()
UpperCAmelCase_ :Optional[int] = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
UpperCAmelCase_ :Tuple = self.get_dummy_inputs(snake_case )
UpperCAmelCase_ :List[str] = pipe(**snake_case ).images
UpperCAmelCase_ :Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
UpperCAmelCase_ :Union[str, Any] = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
UpperCAmelCase_ :int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case , 1e-3 )
def snake_case_ ( self : List[Any] ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def snake_case_ ( self : Dict ):
super().test_save_load_local(expected_max_difference=3e-3 )
def snake_case_ ( self : Tuple ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def snake_case_ ( self : Optional[int] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self : List[str] ):
UpperCAmelCase_ :Any = '''google/ddpm-cifar10-32'''
UpperCAmelCase_ :Union[str, Any] = UNetaDModel.from_pretrained(snake_case )
UpperCAmelCase_ :Any = DDIMScheduler()
UpperCAmelCase_ :Optional[int] = DDIMPipeline(unet=snake_case , scheduler=snake_case )
ddim.to(snake_case )
ddim.set_progress_bar_config(disable=snake_case )
UpperCAmelCase_ :List[Any] = torch.manual_seed(0 )
UpperCAmelCase_ :List[str] = ddim(generator=snake_case , eta=0.0 , output_type='''numpy''' ).images
UpperCAmelCase_ :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ :Union[str, Any] = np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self : List[str] ):
UpperCAmelCase_ :Dict = '''google/ddpm-ema-bedroom-256'''
UpperCAmelCase_ :int = UNetaDModel.from_pretrained(snake_case )
UpperCAmelCase_ :Dict = DDIMScheduler.from_pretrained(snake_case )
UpperCAmelCase_ :Optional[Any] = DDIMPipeline(unet=snake_case , scheduler=snake_case )
ddpm.to(snake_case )
ddpm.set_progress_bar_config(disable=snake_case )
UpperCAmelCase_ :Any = torch.manual_seed(0 )
UpperCAmelCase_ :Optional[Any] = ddpm(generator=snake_case , output_type='''numpy''' ).images
UpperCAmelCase_ :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ :Any = np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 608
|
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
__lowerCAmelCase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowerCAmelCase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_snake_case = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_snake_case = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_SCREAMING_SNAKE_CASE ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_snake_case = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_SCREAMING_SNAKE_CASE )
return next_generation
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = []
for _ in range(_SCREAMING_SNAKE_CASE ):
# Create output image
_snake_case = Image.new("""RGB""" , (len(cells[0] ), len(_SCREAMING_SNAKE_CASE )) )
_snake_case = img.load()
# Save cells to image
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
for y in range(len(cells[0] ) ):
_snake_case = 255 - cells[y][x] * 255
_snake_case = (colour, colour, colour)
# Save image
images.append(_SCREAMING_SNAKE_CASE )
_snake_case = new_generation(_SCREAMING_SNAKE_CASE )
return images
if __name__ == "__main__":
__lowerCAmelCase = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 585
| 0
|
"""simple docstring"""
from PIL import Image
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Image:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = image.size
UpperCamelCase__ = 0
UpperCamelCase__ = image.load()
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(SCREAMING_SNAKE_CASE ):
for i in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
A__ : List[str]= mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 715
|
"""simple docstring"""
from copy import deepcopy
class __lowerCamelCase :
def __init__( self , snake_case_ = None , snake_case_ = None ) -> None:
if arr is None and size is not None:
UpperCamelCase__ = size
UpperCamelCase__ = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> None:
UpperCamelCase__ = len(snake_case_ )
UpperCamelCase__ = deepcopy(snake_case_ )
for i in range(1 , self.size ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self ) -> list[int]:
UpperCamelCase__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCamelCase__ = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCamelCase__ = self.next_(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> None:
self.add(snake_case_ , value - self.get(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
if right == 0:
return 0
UpperCamelCase__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCamelCase__ = self.prev(snake_case_ )
return result
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> int:
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
return self.query(snake_case_ , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
UpperCamelCase__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCamelCase__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=[10, 20, 30, 40] , UpperCamelCase_ : Tuple=[1, 1, 2, 1] , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict="relu" , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=None , ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : List[str] = image_size
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : Union[str, Any] = embeddings_size
__UpperCAmelCase : Dict = hidden_sizes
__UpperCAmelCase : Dict = depths
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : str = num_labels
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : Dict = len(UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : Dict = self.get_config()
return config, pixel_values
def a_ ( self : Dict):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a_ ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = FlaxRegNetModel(config=UpperCamelCase_)
__UpperCAmelCase : Dict = model(UpperCamelCase_)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a_ ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Tuple = FlaxRegNetForImageClassification(config=UpperCamelCase_)
__UpperCAmelCase : str = model(UpperCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowercase_ = False
lowercase_ = False
lowercase_ = False
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Tuple = FlaxRegNetModelTester(self)
__UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_)
def a_ ( self : Dict):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ ( self : Tuple):
"""simple docstring"""
return
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_)
@unittest.skip(reason="RegNet does not use inputs_embeds")
def a_ ( self : Union[str, Any]):
"""simple docstring"""
pass
@unittest.skip(reason="RegNet does not support input and output embeddings")
def a_ ( self : Optional[int]):
"""simple docstring"""
pass
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Any = [*signature.parameters.keys()]
__UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase_)
def a_ ( self : int):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]):
__UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_))
__UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : str = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_) , expected_num_stages + 1)
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__UpperCAmelCase : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_)
@jax.jit
def model_jitted(UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int]):
return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_)
with self.subTest("JIT Enabled"):
__UpperCAmelCase : Optional[Any] = model_jitted(**UpperCamelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
__UpperCAmelCase : Dict = model_jitted(**UpperCamelCase_).to_tuple()
self.assertEqual(len(UpperCamelCase_) , len(UpperCamelCase_))
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class a__ ( unittest.TestCase ):
@cached_property
def a_ ( self : Optional[int]):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None
@slow
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040")
__UpperCAmelCase : Dict = self.default_image_processor
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : Dict = model(**UpperCamelCase_)
# verify the logits
__UpperCAmelCase : Dict = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCamelCase_)
__UpperCAmelCase : Any = jnp.array([-0.4180, -1.5051, -3.4836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4))
| 77
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__a = logging.get_logger(__name__)
__a = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''imagegpt'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=512 + 1 ,_SCREAMING_SNAKE_CASE=32 * 32 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : Union[str, Any] = n_embd
UpperCAmelCase_ : Any = n_layer
UpperCAmelCase_ : Optional[Any] = n_head
UpperCAmelCase_ : Union[str, Any] = n_inner
UpperCAmelCase_ : List[Any] = activation_function
UpperCAmelCase_ : List[str] = resid_pdrop
UpperCAmelCase_ : str = embd_pdrop
UpperCAmelCase_ : Optional[Any] = attn_pdrop
UpperCAmelCase_ : Dict = layer_norm_epsilon
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Dict = scale_attn_weights
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : List[str] = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ : Tuple = reorder_and_upcast_attn
UpperCAmelCase_ : int = tie_word_embeddings
super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
class __a( _a ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,) -> Mapping[str, Any]:
UpperCAmelCase_ : Any = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) )
return inputs
| 30
| 0
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __lowercase ( lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int]=8 ):
SCREAMING_SNAKE_CASE__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if latents is None:
SCREAMING_SNAKE_CASE__ = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
SCREAMING_SNAKE_CASE__ = latents.to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self , UpperCAmelCase__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
SCREAMING_SNAKE_CASE__ = torch.device(f'''cuda:{gpu_id}''' )
SCREAMING_SNAKE_CASE__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self , UpperCAmelCase__=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
SCREAMING_SNAKE_CASE__ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 512 , UpperCAmelCase__ = 512 , UpperCAmelCase__ = 100 , UpperCAmelCase__ = 4.0 , UpperCAmelCase__ = 1 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = "pil" , UpperCAmelCase__ = True , ):
SCREAMING_SNAKE_CASE__ = self._execution_device
SCREAMING_SNAKE_CASE__ = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCAmelCase__ , dim=0 )
SCREAMING_SNAKE_CASE__ = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
SCREAMING_SNAKE_CASE__ = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
SCREAMING_SNAKE_CASE__ = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps
SCREAMING_SNAKE_CASE__ = self.movq.config.latent_channels
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE__ = {"image_embeds": image_embeds, "hint": hint}
SCREAMING_SNAKE_CASE__ = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE__ = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE__ = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE__ = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 721
|
"""simple docstring"""
from __future__ import annotations
def __lowercase ( lowerCamelCase_ : list , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
SCREAMING_SNAKE_CASE__ = result + left + right
return input_list
def __lowercase ( lowerCamelCase_ : list ):
if len(lowerCamelCase_ ) <= 1:
return input_list
SCREAMING_SNAKE_CASE__ = list(lowerCamelCase_ )
# iteration for two-way merging
SCREAMING_SNAKE_CASE__ = 2
while p <= len(lowerCamelCase_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCamelCase_ ) , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = i + p - 1
SCREAMING_SNAKE_CASE__ = (low + high + 1) // 2
SCREAMING_SNAKE_CASE__ = merge(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# final merge of last two parts
if p * 2 >= len(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = merge(lowerCamelCase_ , 0 , lowerCamelCase_ , len(lowerCamelCase_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
_lowerCamelCase = []
else:
_lowerCamelCase = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 112
| 0
|
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Tuple=64 , SCREAMING_SNAKE_CASE__ : Tuple=None ) -> int:
lowerCAmelCase__ = np.random.default_rng(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Dict ) -> List[str]:
return self.length
def __getitem__( self : int , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
return {"x": self.x[i], "y": self.y[i]}
class __lowerCamelCase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : str=False ) -> Tuple:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ) -> Tuple:
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class __lowerCamelCase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Dict:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ).float() )
lowerCAmelCase__ = True
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any=None ) -> Dict:
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
lowerCAmelCase__ = False
return x * self.a + self.b
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase__ = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
lowerCAmelCase__ = load_dataset("csv" , data_files=lowerCAmelCase_ )
lowerCAmelCase__ = datasets["train"].unique("label" )
lowerCAmelCase__ = {v: i for i, v in enumerate(lowerCAmelCase_ )}
def tokenize_function(lowerCAmelCase_ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length" )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(lowerCAmelCase_ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase_ , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(lowerCAmelCase_ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=1 )
return train_dataloader, eval_dataloader
| 61
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __lowercase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : int=7 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : int=True , UpperCamelCase_ : str=False , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]=99 , UpperCamelCase_ : Any=32 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : Union[str, Any]=64 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=16 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int=None , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : str=4 , UpperCamelCase_ : List[str]=1 , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
__A = q_groups
__A = k_groups
__A = v_groups
__A = post_attention_groups
__A = intermediate_groups
__A = output_groups
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = SqueezeBertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , UpperCamelCase_ )
__A = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
__A = SqueezeBertForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
__A = SqueezeBertForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ):
"""simple docstring"""
__A = self.num_labels
__A = SqueezeBertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ):
"""simple docstring"""
__A = self.num_labels
__A = SqueezeBertForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = self.num_choices
__A = SqueezeBertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A = self.prepare_config_and_inputs()
((__A) , (__A) , (__A) , (__A) , (__A) , (__A)) = config_and_inputs
__A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = SqueezeBertModelTester(self )
__A = ConfigTester(self , config_class=UpperCamelCase_ , dim=37 )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCamelCase_ )
@slow
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = SqueezeBertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
__A = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
__A = model(UpperCamelCase_ )[0]
__A = torch.Size((1, 3) )
self.assertEqual(output.shape , UpperCamelCase_ )
__A = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-4 ) )
| 637
| 0
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
'''simple docstring'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=128 , A__=32 , A__=16 , A__=2 , A__=0.0_2 , A__=3 , A__=4 , A__=None , ):
A__ : List[Any] = parent
A__ : int = batch_size
A__ : Union[str, Any] = seq_length
A__ : List[str] = is_training
A__ : Optional[Any] = use_input_mask
A__ : str = use_token_type_ids
A__ : Union[str, Any] = use_labels
A__ : Optional[int] = vocab_size
A__ : Optional[int] = hidden_size
A__ : Any = num_hidden_layers
A__ : Optional[Any] = num_attention_heads
A__ : List[str] = intermediate_size
A__ : Any = hidden_act
A__ : Dict = hidden_dropout_prob
A__ : str = attention_probs_dropout_prob
A__ : Any = max_position_embeddings
A__ : List[str] = type_vocab_size
A__ : Dict = type_sequence_label_size
A__ : Optional[int] = initializer_range
A__ : str = num_labels
A__ : Any = num_choices
A__ : Union[str, Any] = scope
def __A ( self ):
A__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Optional[int] = None
if self.use_input_mask:
A__ : int = random_attention_mask([self.batch_size, self.seq_length] )
A__ : str = None
if self.use_token_type_ids:
A__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Tuple = None
A__ : int = None
A__ : str = None
if self.use_labels:
A__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
A__ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
def __A ( self ):
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : List[Any] = self.prepare_config_and_inputs()
A__ : Union[str, Any] = True
A__ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : List[Any] = NezhaModel(config=A__ )
model.to(A__ )
model.eval()
A__ : Union[str, Any] = model(A__ , attention_mask=A__ , token_type_ids=A__ )
A__ : str = model(A__ , token_type_ids=A__ )
A__ : Optional[int] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
A__ : List[str] = True
A__ : List[Any] = NezhaModel(A__ )
model.to(A__ )
model.eval()
A__ : Tuple = model(
A__ , attention_mask=A__ , token_type_ids=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , )
A__ : Optional[Any] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , encoder_hidden_states=A__ , )
A__ : Dict = model(A__ , attention_mask=A__ , token_type_ids=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Any = NezhaForMaskedLM(config=A__ )
model.to(A__ )
model.eval()
A__ : int = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Optional[Any] = NezhaForNextSentencePrediction(config=A__ )
model.to(A__ )
model.eval()
A__ : int = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : str = NezhaForPreTraining(config=A__ )
model.to(A__ )
model.eval()
A__ : Union[str, Any] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , next_sentence_label=A__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : str = NezhaForQuestionAnswering(config=A__ )
model.to(A__ )
model.eval()
A__ : str = model(
A__ , attention_mask=A__ , token_type_ids=A__ , start_positions=A__ , end_positions=A__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Optional[int] = self.num_labels
A__ : Optional[int] = NezhaForSequenceClassification(A__ )
model.to(A__ )
model.eval()
A__ : List[Any] = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Union[str, Any] = self.num_labels
A__ : List[Any] = NezhaForTokenClassification(config=A__ )
model.to(A__ )
model.eval()
A__ : List[str] = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : int = self.num_choices
A__ : Optional[int] = NezhaForMultipleChoice(config=A__ )
model.to(A__ )
model.eval()
A__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : int = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
A__ : Any = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : List[str] = config_and_inputs
A__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__: List[Any] = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__: List[Any] = True
def __A ( self , A__ , A__ , A__=False ):
A__ : Tuple = super()._prepare_for_class(A__ , A__ , return_labels=A__ )
if return_labels:
if model_class in get_values(A__ ):
A__ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A__ )
A__ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A__ )
return inputs_dict
def __A ( self ):
A__ : Optional[int] = NezhaModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=A__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def __A ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A__ )
def __A ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__ )
def __A ( self ):
A__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*A__ )
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A__ )
def __A ( self ):
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
@slow
def __A ( self ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = NezhaModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@slow
@require_torch_gpu
def __A ( self ):
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
A__ : Optional[Any] = True
A__ : Dict = model_class(config=A__ )
A__ : Optional[Any] = self._prepare_for_class(A__ , A__ )
A__ : List[Any] = torch.jit.trace(
A__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A__ , os.path.join(A__ , """bert.pt""" ) )
A__ : Optional[Any] = torch.jit.load(os.path.join(A__ , """bert.pt""" ) , map_location=A__ )
loaded(inputs_dict["""input_ids"""].to(A__ ) , inputs_dict["""attention_mask"""].to(A__ ) )
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ):
A__ : Optional[int] = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
A__ : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A__ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ : Any = model(A__ , attention_mask=A__ )[0]
A__ : str = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , A__ )
A__ : Optional[Any] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A__ , atol=1e-4 ) )
@slow
def __A ( self ):
A__ : List[Any] = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
A__ : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A__ : Tuple = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ : List[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Union[str, Any] = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , A__ )
A__ : Optional[int] = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A__ , atol=1e-4 ) )
| 64
|
from __future__ import annotations
from collections.abc import Callable
A_ : List[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> Matrix:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
A__ : int
A__ : int
A__ : int
A__ : int
A__ : int
A__ : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
A__ : List[str] = matrix[row][col]
A__ : int = vector[row][0]
A__ : Optional[int] = 0
A__ : str = 0
while row < size and col < size:
# pivoting
A__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
A__ : List[Any] = augmented[rowa][col] / augmented[row][col]
A__ : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def UpperCamelCase (lowercase_: list[int] ) -> Callable[[int], int]:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
A__ : Matrix = [[0] for _ in range(lowercase_ )]
A__ : Matrix
A__ : int
A__ : int
A__ : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : Any = y_val
A__ : Union[str, Any] = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> int:
A__ : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
A__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : int = 0
A__ : Callable[[int], int]
A__ : int
for poly in polynomials:
A__ : List[str] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64
| 1
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Tuple = SMALL_MODEL_IDENTIFIER
__UpperCamelCase :int = '''pt'''
__UpperCamelCase :Dict = '''tf'''
def UpperCamelCase__ ( self , __lowercase) -> Optional[int]:
__UpperCamelCase :Dict = AutoModel.from_pretrained(self.test_model)
model_pt.save_pretrained(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> Union[str, Any]:
__UpperCamelCase :str = TFAutoModel.from_pretrained(self.test_model , from_pt=__lowercase)
model_tf.save_pretrained(__lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :List[Any] = '''mock_framework'''
# Framework provided - return whatever the user provides
__UpperCamelCase :Tuple = FeaturesManager.determine_framework(self.test_model , __lowercase)
self.assertEqual(__lowercase , __lowercase)
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__lowercase)
__UpperCamelCase :Optional[int] = FeaturesManager.determine_framework(__lowercase , __lowercase)
self.assertEqual(__lowercase , __lowercase)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__lowercase)
__UpperCamelCase :Any = FeaturesManager.determine_framework(__lowercase , __lowercase)
self.assertEqual(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Dict:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__lowercase)
__UpperCamelCase :Tuple = FeaturesManager.determine_framework(__lowercase)
self.assertEqual(__lowercase , self.framework_pt)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__lowercase)
__UpperCamelCase :str = FeaturesManager.determine_framework(__lowercase)
self.assertEqual(__lowercase , self.framework_tf)
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__lowercase):
__UpperCamelCase :Union[str, Any] = FeaturesManager.determine_framework(__lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[str] = MagicMock(return_value=__lowercase)
with patch('''transformers.onnx.features.is_tf_available''' , __lowercase):
__UpperCamelCase :Tuple = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(__lowercase , self.framework_pt)
# PyTorch not in environment -> use TensorFlow
__UpperCamelCase :Optional[Any] = MagicMock(return_value=__lowercase)
with patch('''transformers.onnx.features.is_torch_available''' , __lowercase):
__UpperCamelCase :Tuple = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(__lowercase , self.framework_tf)
# Both in environment -> use PyTorch
__UpperCamelCase :Tuple = MagicMock(return_value=__lowercase)
__UpperCamelCase :Tuple = MagicMock(return_value=__lowercase)
with patch('''transformers.onnx.features.is_tf_available''' , __lowercase), patch(
'''transformers.onnx.features.is_torch_available''' , __lowercase):
__UpperCamelCase :int = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(__lowercase , self.framework_pt)
# Both not in environment -> raise error
__UpperCamelCase :List[Any] = MagicMock(return_value=__lowercase)
__UpperCamelCase :List[str] = MagicMock(return_value=__lowercase)
with patch('''transformers.onnx.features.is_tf_available''' , __lowercase), patch(
'''transformers.onnx.features.is_torch_available''' , __lowercase):
with self.assertRaises(__lowercase):
__UpperCamelCase :Optional[int] = FeaturesManager.determine_framework(self.test_model)
| 167
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
__UpperCamelCase :Union[str, Any] = Dataset.from_dict(SCREAMING_SNAKE_CASE )
return dataset
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :str = get_dataset()
__UpperCamelCase :Dict = make_duplicate_clusters(__lowercase , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Tuple = get_dataset()
__UpperCamelCase , __UpperCamelCase :Dict = deduplicate_dataset(__lowercase)
self.assertEqual(len(__lowercase) , 2)
print(__lowercase)
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2)
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , __lowercase)
| 167
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def UpperCamelCase__ ( *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
pass
def _UpperCamelCase ( UpperCamelCase_ : Tuple ) -> Any:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__snake_case : List[str] = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
_SCREAMING_SNAKE_CASE : Dict = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
'document-question-answering' , model=_UpperCamelCase , tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = list(zip(*apply_tesseract(load_image(_UpperCamelCase ) , _UpperCamelCase , '' ) ) )
lowerCAmelCase__ = 'What is the placebo?'
lowerCAmelCase__ = [
{
'image': load_image(_UpperCamelCase ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = dqa_pipeline(_UpperCamelCase , top_k=2 )
self.assertEqual(
_UpperCamelCase , [
[
{'score': ANY(_UpperCamelCase ), 'answer': ANY(_UpperCamelCase ), 'start': ANY(_UpperCamelCase ), 'end': ANY(_UpperCamelCase )},
{'score': ANY(_UpperCamelCase ), 'answer': ANY(_UpperCamelCase ), 'start': ANY(_UpperCamelCase ), 'end': ANY(_UpperCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'How many cats are there?'
lowerCAmelCase__ = [
{'score': 0.00_01, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.00_01, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(_UpperCamelCase , decimals=4 ) , _UpperCamelCase )
lowerCAmelCase__ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(_UpperCamelCase , decimals=4 ) , _UpperCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCAmelCase__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(_UpperCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCAmelCase__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , words=_UpperCamelCase , boxes=_UpperCamelCase , top_k=2 )
self.assertEqual(_UpperCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCAmelCase__ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{'score': 0.99_44, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.00_09, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCAmelCase__ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{'score': 0.99_74, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.99_48, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=_UpperCamelCase )
lowerCAmelCase__ = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=_UpperCamelCase , revision='3dc6de3' , )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
lowerCAmelCase__ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
lowerCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
lowerCAmelCase__ = list(zip(*apply_tesseract(load_image(_UpperCamelCase ) , _UpperCamelCase , '' ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase__ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.42_51, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.08_19, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=_UpperCamelCase )
lowerCAmelCase__ = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=_UpperCamelCase , revision='3dc6de3' , max_seq_len=50 , )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowerCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
lowerCAmelCase__ = list(zip(*apply_tesseract(load_image(_UpperCamelCase ) , _UpperCamelCase , '' ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase__ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.99_99, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.99_98, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=_UpperCamelCase , question=_UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(_UpperCamelCase , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
| 365
| 1
|
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
lowerCAmelCase_ : Tuple = 2048
lowerCAmelCase_ : Tuple = 4096
lowerCAmelCase_ : Any = 42
lowerCAmelCase_ : int = os.environ.pop("""PROCESS_TRAIN""", """false""")
lowerCAmelCase_ : Union[str, Any] = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def __A ( UpperCAmelCase ) -> Dict:
'''simple docstring'''
def choose_first(UpperCAmelCase ,UpperCAmelCase=False ):
assert isinstance(UpperCAmelCase ,UpperCAmelCase )
if len(UpperCAmelCase ) == 1:
_UpperCamelCase : List[Any] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
_UpperCamelCase : List[Any] = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
_UpperCamelCase : Dict = {"id": example["id"]}
_UpperCamelCase : List[Any] = example["annotations"]
_UpperCamelCase : Optional[Any] = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
_UpperCamelCase : List[Any] = ["yes"] if 1 in yes_no_answer else ["no"]
_UpperCamelCase : Tuple = []
_UpperCamelCase : int = []
_UpperCamelCase : str = ["<cls>"]
else:
_UpperCamelCase : str = ["short"]
_UpperCamelCase : Any = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
_UpperCamelCase : int = ["long"]
_UpperCamelCase : Dict = choose_first(annotation["long_answer"] ,is_long_answer=UpperCAmelCase )
_UpperCamelCase : Any = []
answer.update(UpperCAmelCase )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
_UpperCamelCase : Dict = True
else:
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Optional[Any] = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] ,UpperCAmelCase ) for k in cols ):
raise ValueError("Issue in ID" ,example["id"] )
return answer
def __A ( UpperCAmelCase ,UpperCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase : str = _get_single_answer(UpperCAmelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
_UpperCamelCase : int = example["document"]["tokens"]
_UpperCamelCase : int = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(UpperCAmelCase ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
_UpperCamelCase : Tuple = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
_UpperCamelCase : Optional[Any] = example["document"]["tokens"]
_UpperCamelCase : Tuple = answer["start_token"]
_UpperCamelCase : Tuple = answer["end_token"]
_UpperCamelCase : List[Any] = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
_UpperCamelCase : Union[str, Any] = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
_UpperCamelCase : Optional[int] = doc["is_html"][answer["start_token"] : answer["end_token"]]
_UpperCamelCase : Any = doc["token"][answer["start_token"] : answer["end_token"]]
_UpperCamelCase : int = " ".join([old[i] for i in range(len(UpperCAmelCase ) ) if not is_html[i]] )
if new != old:
print("ID:" ,example["id"] )
print("New:" ,UpperCAmelCase ,end="\n" )
print("Old:" ,UpperCAmelCase ,end="\n\n" )
return {
"context": " ".join(UpperCAmelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=2_0_4_8 ,UpperCAmelCase=4_0_9_6 ,UpperCAmelCase=True ) -> List[str]:
'''simple docstring'''
# overlap will be of doc_stride - q_len
_UpperCamelCase : Dict = get_context_and_ans(UpperCAmelCase ,assertion=UpperCAmelCase )
_UpperCamelCase : Dict = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
_UpperCamelCase : str = tokenizer(example["question"]["text"] ,out["context"] ).input_ids
_UpperCamelCase : Tuple = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
_UpperCamelCase : List[Any] = []
_UpperCamelCase : Dict = []
_UpperCamelCase : Dict = input_ids[:q_len]
_UpperCamelCase : Optional[Any] = range(UpperCAmelCase ,len(UpperCAmelCase ) ,max_length - doc_stride )
for i in doc_start_indices:
_UpperCamelCase : Optional[int] = i + max_length - q_len
_UpperCamelCase : str = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(UpperCAmelCase ),
"end_token": [-1_0_0] * len(UpperCAmelCase ),
"category": category,
},
}
_UpperCamelCase : Any = out["context"].split()
_UpperCamelCase : List[str] = splitted_context[answer["end_token"]]
_UpperCamelCase : Dict = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) ,add_special_tokens=UpperCAmelCase ,).input_ids )
_UpperCamelCase : Tuple = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) ,add_special_tokens=UpperCAmelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
_UpperCamelCase : str = len(tokenizer(UpperCAmelCase ,add_special_tokens=UpperCAmelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
_UpperCamelCase : Dict = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
_UpperCamelCase : Tuple = answer["start_token"]
_UpperCamelCase : Tuple = answer["end_token"]
if assertion:
_UpperCamelCase : int = tokenizer.decode(UpperCAmelCase )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" ,answer["span"] )
print("NEW:" ,UpperCAmelCase ,end="\n\n" )
if len(UpperCAmelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
_UpperCamelCase : Optional[Any] = input_ids[:q_len]
_UpperCamelCase : Dict = range(UpperCAmelCase ,len(UpperCAmelCase ) ,max_length - doc_stride )
_UpperCamelCase : Tuple = []
_UpperCamelCase : Dict = []
_UpperCamelCase : List[Any] = []
_UpperCamelCase : Tuple = [] # null, yes, no, long, short
for i in doc_start_indices:
_UpperCamelCase : int = i + max_length - q_len
_UpperCamelCase : Optional[int] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
_UpperCamelCase : Dict = start_token - i + q_len
_UpperCamelCase : Dict = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
_UpperCamelCase : Tuple = -1_0_0
_UpperCamelCase : Optional[int] = -1_0_0
answers_category.append("null" )
_UpperCamelCase : str = inputs[-1][start_token : end_token + 1]
answers_start_token.append(UpperCAmelCase )
answers_end_token.append(UpperCAmelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" ,example["id"] )
print("New:" ,tokenizer.decode(UpperCAmelCase ) )
print("Old:" ,tokenizer.decode(UpperCAmelCase ) ,end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=2_0_4_8 ,UpperCAmelCase=4_0_9_6 ,UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Any = get_strided_contexts_and_ans(
UpperCAmelCase ,UpperCAmelCase ,doc_stride=UpperCAmelCase ,max_length=UpperCAmelCase ,assertion=UpperCAmelCase ,)
return example
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
with jsonlines.open(UpperCAmelCase ,"a" ) as writer:
for example in tqdm(UpperCAmelCase ,total=len(UpperCAmelCase ) ,desc="Saving samples ... " ):
_UpperCamelCase : int = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] ,labels["start_token"] ,labels["end_token"] ,labels["category"] ,):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
lowerCAmelCase_ : List[Any] = load_dataset("""natural_questions""")
lowerCAmelCase_ : str = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
lowerCAmelCase_ : Union[str, Any] = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
lowerCAmelCase_ : Optional[int] = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
lowerCAmelCase_ : str = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
lowerCAmelCase_ : Tuple = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
lowerCAmelCase_ : Optional[int] = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 435
|
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
lowerCAmelCase_ : int = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
lowerCAmelCase_ : Optional[int] = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
lowerCAmelCase_ : Any = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self : str ) ->Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def snake_case__ ( self : Optional[int] , lowercase__ : Any , lowercase__ : Tuple , lowercase__ : Optional[Any]=None , lowercase__ : List[str]=1 , lowercase__ : Optional[int]="binary" , lowercase__ : int=None ) ->int:
'''simple docstring'''
_UpperCamelCase : List[str] = fa_score(
lowercase__ , lowercase__ , labels=lowercase__ , pos_label=lowercase__ , average=lowercase__ , sample_weight=lowercase__ )
return {"f1": float(lowercase__ ) if score.size == 1 else score}
| 435
| 1
|
from __future__ import annotations
def UpperCamelCase ( _a , _a ) -> float:
'''simple docstring'''
lowercase_ :Dict = sorted(numsa + numsa )
lowercase_ :Dict = divmod(len(lowerCamelCase_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Dict = [float(x) for x in input("Enter the elements of first array: ").split()]
SCREAMING_SNAKE_CASE : List[Any] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 706
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE : str = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 441
| 0
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 99
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99
| 1
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = IFPipeline
lowercase : Tuple = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : int = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase__ ( self ) -> str:
return self._get_dummy_components()
def UpperCamelCase__ ( self , __A , __A=0 ) -> int:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ) -> str:
self._test_save_load_local()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
# if
_lowerCAmelCase =IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase =IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__A , tokenizer=__A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase =pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase =None
_lowerCAmelCase =None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase =IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase =IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase =IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase =IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__A , __A , __A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.