code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : str = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 636 | class snake_case ( UpperCamelCase_ ):
pass
class snake_case ( UpperCamelCase_ ):
pass
class snake_case :
def __init__( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [
[],
[],
[],
]
def __lowercase( self : int , a_ : int , a_ : int )-> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(a_ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def __lowercase( self : int )-> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self : Any )-> str:
"""simple docstring"""
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class snake_case :
def __init__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = []
def __lowercase( self : List[str] , a_ : int )-> None:
"""simple docstring"""
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(a_ )
def __lowercase( self : int )-> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = min(self.queue )
self.queue.remove(a_ )
return data
def __str__( self : List[str] )-> str:
"""simple docstring"""
return str(self.queue )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 636 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
class snake_case ( UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ = 'maskformer-swin'
lowercase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[Any] , a_ : List[str]=224 , a_ : Tuple=4 , a_ : List[Any]=3 , a_ : Optional[int]=96 , a_ : Any=[2, 2, 6, 2] , a_ : int=[3, 6, 12, 24] , a_ : Any=7 , a_ : Any=4.0 , a_ : Optional[int]=True , a_ : Optional[Any]=0.0 , a_ : Optional[Any]=0.0 , a_ : Optional[int]=0.1 , a_ : Union[str, Any]="gelu" , a_ : Optional[Any]=False , a_ : Any=0.02 , a_ : Optional[int]=1e-5 , a_ : List[Any]=None , a_ : Dict=None , **a_ : Any , )-> Any:
"""simple docstring"""
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ : str = image_size
SCREAMING_SNAKE_CASE__ : List[Any] = patch_size
SCREAMING_SNAKE_CASE__ : str = num_channels
SCREAMING_SNAKE_CASE__ : Dict = embed_dim
SCREAMING_SNAKE_CASE__ : List[str] = depths
SCREAMING_SNAKE_CASE__ : int = len(a_ )
SCREAMING_SNAKE_CASE__ : Dict = num_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = window_size
SCREAMING_SNAKE_CASE__ : Tuple = mlp_ratio
SCREAMING_SNAKE_CASE__ : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = drop_path_rate
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE__ : int = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE__ : Dict = int(embed_dim * 2 ** (len(a_ ) - 1) )
SCREAMING_SNAKE_CASE__ : Tuple = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(a_ ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_aligned_output_features_output_indices(
out_features=a_ , out_indices=a_ , stage_names=self.stage_names )
| 636 | from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _a ( lowercase__ : List[str] ):
'''simple docstring'''
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE__ : str = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase__ ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[int] , *lowercase__ : int , **lowercase__ : Tuple ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase__ , **lowercase__ )
return wrapper
| 636 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'mvp'
lowercase_ = ['past_key_values']
lowercase_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[Any] , a_ : Dict=5_0267 , a_ : Tuple=1024 , a_ : Dict=12 , a_ : str=4096 , a_ : List[Any]=16 , a_ : Optional[Any]=12 , a_ : Optional[int]=4096 , a_ : str=16 , a_ : int=0.0 , a_ : Dict=0.0 , a_ : List[Any]="gelu" , a_ : List[str]=1024 , a_ : Any=0.1 , a_ : List[Any]=0.0 , a_ : Dict=0.0 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=0.0 , a_ : Optional[Any]=False , a_ : Tuple=True , a_ : Optional[Any]=1 , a_ : Dict=0 , a_ : str=2 , a_ : Dict=True , a_ : str=2 , a_ : Optional[int]=2 , a_ : Any=False , a_ : Optional[Any]=100 , a_ : int=800 , **a_ : Union[str, Any] , )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = d_model
SCREAMING_SNAKE_CASE__ : List[str] = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : int = encoder_layers
SCREAMING_SNAKE_CASE__ : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decoder_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE__ : List[str] = activation_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = activation_function
SCREAMING_SNAKE_CASE__ : List[str] = init_std
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = classifier_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = use_cache
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_prompt
SCREAMING_SNAKE_CASE__ : Any = prompt_length
SCREAMING_SNAKE_CASE__ : int = prompt_mid_dim
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , forced_eos_token_id=a_ , **a_ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , a_ ):
SCREAMING_SNAKE_CASE__ : Any = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
| 636 | import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _a ( lowercase__ : int ):
'''simple docstring'''
if is_torch_version('<' , '2.0.0' ) or not hasattr(lowercase__ , '_dynamo' ):
return False
return isinstance(lowercase__ , torch._dynamo.eval_frame.OptimizedModule )
def _a ( lowercase__ : Optional[Any] , lowercase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE__ : Dict = is_compiled_module(lowercase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : Tuple = model
SCREAMING_SNAKE_CASE__ : int = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Any = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(lowercase__ , 'forward' )
SCREAMING_SNAKE_CASE__ : str = model.__dict__.pop('_original_forward' , lowercase__ )
if original_forward is not None:
while hasattr(lowercase__ , '__wrapped__' ):
SCREAMING_SNAKE_CASE__ : Dict = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE__ : Dict = forward
if getattr(lowercase__ , '_converted_to_transformer_engine' , lowercase__ ):
convert_model(lowercase__ , to_transformer_engine=lowercase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : List[Any] = model
SCREAMING_SNAKE_CASE__ : Optional[Any] = compiled_model
return model
def _a ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _a ( lowercase__ : str , lowercase__ : Optional[Any] ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowercase__ , lowercase__ )
elif PartialState().local_process_index == 0:
torch.save(lowercase__ , lowercase__ )
@contextmanager
def _a ( **lowercase__ : str ):
'''simple docstring'''
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE__ : int = str(lowercase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if not hasattr(lowercase__ , '__qualname__' ) and not hasattr(lowercase__ , '__name__' ):
SCREAMING_SNAKE_CASE__ : Any = getattr(lowercase__ , '__class__' , lowercase__ )
if hasattr(lowercase__ , '__qualname__' ):
return obj.__qualname__
if hasattr(lowercase__ , '__name__' ):
return obj.__name__
return str(lowercase__ )
def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ):
'''simple docstring'''
for key, value in source.items():
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = destination.setdefault(lowercase__ , {} )
merge_dicts(lowercase__ , lowercase__ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = value
return destination
def _a ( lowercase__ : int = None ):
'''simple docstring'''
if port is None:
SCREAMING_SNAKE_CASE__ : int = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 636 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
SCREAMING_SNAKE_CASE__ : Any = True if 'large' in model_name or 'huge' in model_name else False
SCREAMING_SNAKE_CASE__ : List[Any] = True if 'large' in model_name or 'huge' in model_name else False
SCREAMING_SNAKE_CASE__ : Any = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
SCREAMING_SNAKE_CASE__ : int = [3, 3, 3, 3]
SCREAMING_SNAKE_CASE__ : Optional[int] = [5, 5, 5, 5]
elif "fl4" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[int] = [4, 4, 4, 4]
SCREAMING_SNAKE_CASE__ : Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
SCREAMING_SNAKE_CASE__ : List[str] = [3, 3, 3, 3]
if "lrf" in model_name:
SCREAMING_SNAKE_CASE__ : Dict = [3, 3, 3, 3]
else:
SCREAMING_SNAKE_CASE__ : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 96
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 96
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : Any = 1_28
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : List[Any] = 1_92
elif "xlarge" in model_name:
SCREAMING_SNAKE_CASE__ : str = 2_56
elif "huge" in model_name:
SCREAMING_SNAKE_CASE__ : Dict = 3_52
# set label information
SCREAMING_SNAKE_CASE__ : Dict = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
SCREAMING_SNAKE_CASE__ : str = 'imagenet-22k-id2label.json'
else:
SCREAMING_SNAKE_CASE__ : str = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : Tuple = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = FocalNetConfig(
embed_dim=lowercase__ , depths=lowercase__ , focal_levels=lowercase__ , focal_windows=lowercase__ , use_conv_embed=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ , use_post_layernorm=lowercase__ , use_layerscale=lowercase__ , )
return config
def _a ( lowercase__ : List[Any] ):
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
SCREAMING_SNAKE_CASE__ : List[str] = 'encoder.' + name
if "encoder.layers" in name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
SCREAMING_SNAKE_CASE__ : Dict = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
SCREAMING_SNAKE_CASE__ : int = 'layernorm.bias'
if "head" in name:
SCREAMING_SNAKE_CASE__ : int = name.replace('head' , 'classifier' )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'focalnet.' + name
return name
def _a ( lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : Any=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
SCREAMING_SNAKE_CASE__ : List[str] = model_name_to_url[model_name]
print('Checkpoint URL: ' , lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = val
SCREAMING_SNAKE_CASE__ : Optional[int] = get_focalnet_config(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = FocalNetForImageClassification(lowercase__ )
model.eval()
# load state dict
model.load_state_dict(lowercase__ )
# verify conversion
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE__ : int = BitImageProcessor(
do_resize=lowercase__ , size={'shortest_edge': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=lowercase__ , crop_size=2_24 , do_normalize=lowercase__ , image_mean=lowercase__ , image_std=lowercase__ , )
SCREAMING_SNAKE_CASE__ : Tuple = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
SCREAMING_SNAKE_CASE__ : Any = processor(images=lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
SCREAMING_SNAKE_CASE__ : int = image_transforms(lowercase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowercase__ , atol=1E-4 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
SCREAMING_SNAKE_CASE__ : Any = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
SCREAMING_SNAKE_CASE__ : int = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
SCREAMING_SNAKE_CASE__ : int = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 636 | from __future__ import annotations
def _a ( lowercase__ : list[int | float] , lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
if len(lowercase__ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(lowercase__ )
or left < -len(lowercase__ )
or right >= len(lowercase__ )
or right < -len(lowercase__ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : int = find_max(lowercase__ , lowercase__ , lowercase__ ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Tuple = find_max(lowercase__ , mid + 1 , lowercase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 636 | 1 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PhobertTokenizer
lowercase_ = False
def __lowercase( self : Optional[Any] )-> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : Optional[int] = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
SCREAMING_SNAKE_CASE__ : Tuple = dict(zip(a_ , range(len(a_ ) ) ) )
SCREAMING_SNAKE_CASE__ : Any = ['#version: 0.2', 'l à</w>']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a_ ) )
def __lowercase( self : str , **a_ : Any )-> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : List[str] , a_ : Any )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'Tôi là VinAI Research'
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def __lowercase( self : int )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : List[Any] = 'Tôi là VinAI Research'
SCREAMING_SNAKE_CASE__ : Tuple = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize(a_ )
print(a_ )
self.assertListEqual(a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Tuple = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
| 636 | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _a ( lowercase__ : Any ):
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _a ( lowercase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _a ( lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = [state.process_index]
SCREAMING_SNAKE_CASE__ : Any = gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, f'''{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _a ( lowercase__ : int ):
'''simple docstring'''
if state.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.arange(state.num_processes ).to(state.device )
SCREAMING_SNAKE_CASE__ : Any = pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE__ : List[Any] = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = reduce(lowercase__ , 'sum' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}'''
def _a ( lowercase__ : int ):
'''simple docstring'''
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE__ : Any = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = reduce(lowercase__ , 'mean' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}'''
def _a ( lowercase__ : int ):
'''simple docstring'''
main()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = PartialState()
state.print(f'''State: {state}''' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 636 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
def _a ( lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : int=None , lowercase__ : int=None ):
'''simple docstring'''
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : Tuple = tensor_name.split('.' )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : List[str] = getattr(lowercase__ , lowercase__ )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : List[Any] = new_module
SCREAMING_SNAKE_CASE__ : Optional[Any] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = tensor_name in module._buffers
SCREAMING_SNAKE_CASE__ : Tuple = getattr(lowercase__ , lowercase__ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(f'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Dict = False
if is_buffer or not is_bitsandbytes_available():
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
else:
SCREAMING_SNAKE_CASE__ : Any = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
SCREAMING_SNAKE_CASE__ : str = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
SCREAMING_SNAKE_CASE__ : str = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = old_value.to(lowercase__ )
elif isinstance(lowercase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : List[Any] = value.to('cpu' )
if value.dtype == torch.inta:
SCREAMING_SNAKE_CASE__ : str = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(lowercase__ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowercase__ ) and fpaa_statistics is None:
SCREAMING_SNAKE_CASE__ : int = new_value.T
SCREAMING_SNAKE_CASE__ : Optional[Any] = old_value.__dict__
if is_abit:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bnb.nn.IntaParams(lowercase__ , requires_grad=lowercase__ , **lowercase__ ).to(lowercase__ )
elif is_abit:
SCREAMING_SNAKE_CASE__ : Optional[Any] = bnb.nn.Paramsabit(lowercase__ , requires_grad=lowercase__ , **lowercase__ ).to(lowercase__ )
SCREAMING_SNAKE_CASE__ : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(lowercase__ ) )
else:
if value is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = old_value.to(lowercase__ )
elif isinstance(lowercase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : Any = value.to(lowercase__ )
else:
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(lowercase__ , device=lowercase__ )
if is_buffer:
SCREAMING_SNAKE_CASE__ : Any = new_value
else:
SCREAMING_SNAKE_CASE__ : Tuple = nn.Parameter(lowercase__ , requires_grad=old_value.requires_grad )
SCREAMING_SNAKE_CASE__ : List[str] = new_value
def _a ( lowercase__ : str , lowercase__ : List[str]=None , lowercase__ : str=None , lowercase__ : Optional[Any]=None , lowercase__ : Union[str, Any]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : List[Any] = []
current_key_name.append(lowercase__ )
if (isinstance(lowercase__ , nn.Linear ) or isinstance(lowercase__ , lowercase__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(lowercase__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = module.weight.shape
else:
SCREAMING_SNAKE_CASE__ : Any = module.in_features
SCREAMING_SNAKE_CASE__ : int = module.out_features
if quantization_config.quantization_method() == "llm_int8":
SCREAMING_SNAKE_CASE__ : str = bnb.nn.LinearabitLt(
lowercase__ , lowercase__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
SCREAMING_SNAKE_CASE__ : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = bnb.nn.Linearabit(
lowercase__ , lowercase__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
SCREAMING_SNAKE_CASE__ : List[str] = True
# Store the module class in case we need to transpose the weight later
SCREAMING_SNAKE_CASE__ : str = type(lowercase__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowercase__ )
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = _replace_with_bnb_linear(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_been_replaced=lowercase__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _a ( lowercase__ : Dict , lowercase__ : List[str]=None , lowercase__ : Tuple=None , lowercase__ : Any=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = _replace_with_bnb_linear(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def _a ( *lowercase__ : int , **lowercase__ : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , lowercase__ , )
return replace_with_bnb_linear(*lowercase__ , **lowercase__ )
def _a ( *lowercase__ : Union[str, Any] , **lowercase__ : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , lowercase__ , )
return set_module_quantized_tensor_to_device(*lowercase__ , **lowercase__ )
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = deepcopy(lowercase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
SCREAMING_SNAKE_CASE__ : Dict = find_tied_parameters(lowercase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : Dict = sum(lowercase__ , [] )
SCREAMING_SNAKE_CASE__ : Any = len(lowercase__ ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : str = not hasattr(lowercase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : int = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : List[Any] = set(lowercase__ ) - set(lowercase__ )
SCREAMING_SNAKE_CASE__ : int = list(set(lowercase__ ) ) + list(lowercase__ )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : str = ['.weight', '.bias']
SCREAMING_SNAKE_CASE__ : str = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace(lowercase__ , '' )
filtered_module_names.append(lowercase__ )
return filtered_module_names
| 636 | import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : Any = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , a_ : Optional[int] , a_ : Dict=7 , a_ : Any=3 , a_ : Any=18 , a_ : int=30 , a_ : int=400 , a_ : List[Any]=None , a_ : int=True , a_ : int=True , a_ : Dict=None , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'height': 20, 'width': 20}
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Any = num_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE__ : List[str] = min_resolution
SCREAMING_SNAKE_CASE__ : Dict = max_resolution
SCREAMING_SNAKE_CASE__ : List[Any] = size
SCREAMING_SNAKE_CASE__ : Tuple = do_normalize
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_convert_rgb
SCREAMING_SNAKE_CASE__ : List[str] = [512, 1024, 2048, 4096]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __lowercase( self : Dict )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(a_ , stream=a_ ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PixaStructImageProcessor if is_vision_available() else None
def __lowercase( self : List[str] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = PixaStructImageProcessingTester(self )
@property
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , 'do_normalize' ) )
self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor_tester.prepare_dummy_image()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ : List[Any] = 2048
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(a_ , return_tensors='pt' , max_patches=a_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase( self : Any )-> Any:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
SCREAMING_SNAKE_CASE__ : int = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(a_ ):
SCREAMING_SNAKE_CASE__ : Dict = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
SCREAMING_SNAKE_CASE__ : List[Any] = 'Hello'
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processor(
a_ , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
SCREAMING_SNAKE_CASE__ : str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PixaStructImageProcessor if is_vision_available() else None
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = PixaStructImageProcessingTester(self , num_channels=4 )
SCREAMING_SNAKE_CASE__ : Dict = 3
@property
def __lowercase( self : Any )-> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase( self : Dict )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , 'do_normalize' ) )
self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) )
def __lowercase( self : str )-> Union[str, Any]:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 636 | 1 |
def _a ( lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _a ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 636 | import heapq as hq
import math
from collections.abc import Iterator
class snake_case :
def __init__( self : str , a_ : str )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = str(id_ )
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : int , a_ : Tuple )-> Union[str, Any]:
"""simple docstring"""
return self.key < other.key
def __repr__( self : Any )-> Dict:
"""simple docstring"""
return self.id
def __lowercase( self : Optional[Any] , a_ : int )-> List[str]:
"""simple docstring"""
self.neighbors.append(a_ )
def __lowercase( self : int , a_ : int , a_ : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = weight
def _a ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Dict ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowercase__ )
graph[b - 1].add_edge(graph[a - 1] , lowercase__ )
def _a ( lowercase__ : list , lowercase__ : Vertex ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = []
for u in graph:
SCREAMING_SNAKE_CASE__ : Dict = math.inf
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : int = graph[:]
while q:
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(lowercase__ )
q.remove(lowercase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE__ : int = u
SCREAMING_SNAKE_CASE__ : Any = u.edges[v.id]
for i in range(1 , len(lowercase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _a ( lowercase__ : list , lowercase__ : Vertex ):
'''simple docstring'''
for u in graph:
SCREAMING_SNAKE_CASE__ : List[str] = math.inf
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : Tuple = list(lowercase__ )
hq.heapify(lowercase__ )
while h:
SCREAMING_SNAKE_CASE__ : Optional[int] = hq.heappop(lowercase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE__ : List[str] = u
SCREAMING_SNAKE_CASE__ : Dict = u.edges[v.id]
hq.heapify(lowercase__ )
for i in range(1 , len(lowercase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 636 | 1 |
def _a ( lowercase__ : list ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE__ : List[Any] = True
for i in range(0 , len(lowercase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE__ : int = False
for i in range(1 , len(lowercase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
SCREAMING_SNAKE_CASE__ : List[str] = [int(x) for x in input().split()]
# inputing elements of the list in one line
SCREAMING_SNAKE_CASE__ : Dict = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 636 | def _a ( lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _a ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 636 | 1 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = RemBertConfig.from_json_file(lowercase__ )
print('Building PyTorch model from configuration: {}'.format(str(lowercase__ ) ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = RemBertModel(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowercase__ ) )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 636 | from math import factorial, radians
def _a ( lowercase__ : float , lowercase__ : int = 18 , lowercase__ : int = 10 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE__ : int = radians(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = angle_in_radians
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 636 | 1 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None
SCREAMING_SNAKE_CASE__ : Dict = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
SCREAMING_SNAKE_CASE__ : Dict = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class snake_case :
lowercase_ = True
lowercase_ = None
# Automatically constructed
lowercase_ = "PIL.Image.Image"
lowercase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowercase_ = field(default='Image' , init=UpperCamelCase_ , repr=UpperCamelCase_ )
def __call__( self : Optional[int] )-> List[str]:
"""simple docstring"""
return self.pa_type
def __lowercase( self : Tuple , a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] )-> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : Any = np.array(a_ )
if isinstance(a_ , a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_ , a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowercase( self : str , a_ : dict , a_ : Dict=None )-> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
SCREAMING_SNAKE_CASE__ : Any = {}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(a_ ):
SCREAMING_SNAKE_CASE__ : List[Any] = PIL.Image.open(a_ )
else:
SCREAMING_SNAKE_CASE__ : int = path.split('::' )[-1]
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] = string_to_dict(a_ , config.HUB_DATASETS_URL )['repo_id']
SCREAMING_SNAKE_CASE__ : int = token_per_repo_id.get(a_ )
except ValueError:
SCREAMING_SNAKE_CASE__ : str = None
with xopen(a_ , 'rb' , use_auth_token=a_ ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = BytesIO(f.read() )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PIL.Image.open(bytes_ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowercase( self : Tuple )-> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __lowercase( self : Optional[int] , a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] )-> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE__ : Tuple = pa.array([None] * len(a_ ) , type=pa.binary() )
SCREAMING_SNAKE_CASE__ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pa.array([None] * len(a_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ : List[Any] = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
SCREAMING_SNAKE_CASE__ : List[Any] = storage.field('bytes' )
else:
SCREAMING_SNAKE_CASE__ : Any = pa.array([None] * len(a_ ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = storage.field('path' )
else:
SCREAMING_SNAKE_CASE__ : int = pa.array([None] * len(a_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
SCREAMING_SNAKE_CASE__ : Optional[int] = pa.array(
[encode_np_array(np.array(a_ ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pa.array([None] * len(a_ ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ : Any = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(a_ , self.pa_type )
def __lowercase( self : Any , a_ : pa.StructArray )-> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Any ):
with xopen(a_ , 'rb' ) as f:
SCREAMING_SNAKE_CASE__ : Tuple = f.read()
return bytes_
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE__ : List[Any] = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE__ : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(a_ , self.pa_type )
def _a ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
SCREAMING_SNAKE_CASE__ : Any = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _a ( lowercase__ : "PIL.Image.Image" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = BytesIO()
if image.format in list_image_compression_formats():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image.format
else:
SCREAMING_SNAKE_CASE__ : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowercase__ , format=lowercase__ )
return buffer.getvalue()
def _a ( lowercase__ : "PIL.Image.Image" ):
'''simple docstring'''
if hasattr(lowercase__ , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowercase__ )}
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
SCREAMING_SNAKE_CASE__ : str = array.dtype
SCREAMING_SNAKE_CASE__ : List[Any] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dtype.kind
SCREAMING_SNAKE_CASE__ : List[str] = dtype.itemsize
SCREAMING_SNAKE_CASE__ : Optional[int] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
SCREAMING_SNAKE_CASE__ : Tuple = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dtype_byteorder + dtype_kind + str(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dtype(lowercase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
SCREAMING_SNAKE_CASE__ : List[str] = PIL.Image.fromarray(array.astype(lowercase__ ) )
return {"path": None, "bytes": image_to_bytes(lowercase__ )}
def _a ( lowercase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = first_non_null_value(lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowercase__ , np.ndarray ):
SCREAMING_SNAKE_CASE__ : List[Any] = no_op_if_value_is_null(lowercase__ )
return [obj_to_image_dict_func(lowercase__ ) for obj in objs]
elif isinstance(lowercase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ : List[Any] = no_op_if_value_is_null(lowercase__ )
return [obj_to_image_dict_func(lowercase__ ) for obj in objs]
else:
return objs
else:
return objs
| 636 | import math
def _a ( lowercase__ : int ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
SCREAMING_SNAKE_CASE__ : Tuple = range(3 , int(math.sqrt(lowercase__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _a ( lowercase__ : List[str] , lowercase__ : Any=1 , **lowercase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = factor * value
SCREAMING_SNAKE_CASE__ : Dict = value
while not is_prime(lowercase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowercase__ )
return value
| 636 | 1 |
def _a ( lowercase__ : int , lowercase__ : List[Any] ):
'''simple docstring'''
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(lowercase__ ):
for j in range(lowercase__ ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def _a ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = [[float('inf' ) for _ in range(lowercase__ )] for _ in range(lowercase__ )]
for i in range(lowercase__ ):
for j in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Dict = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowercase__ ):
# looping through rows of graph array
for i in range(lowercase__ ):
# looping through columns of graph array
for j in range(lowercase__ ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
SCREAMING_SNAKE_CASE__ : Any = dist[i][k] + dist[k][j]
_print_dist(lowercase__ , lowercase__ )
return dist, v
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(input("Enter number of vertices: "))
SCREAMING_SNAKE_CASE__ : Tuple = int(input("Enter number of edges: "))
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
SCREAMING_SNAKE_CASE__ : Tuple = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(input("Enter source:"))
SCREAMING_SNAKE_CASE__ : Tuple = int(input("Enter destination:"))
SCREAMING_SNAKE_CASE__ : List[Any] = float(input("Enter weight:"))
SCREAMING_SNAKE_CASE__ : Optional[Any] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 636 | import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case :
def __init__( self : str , a_ : List[str] , a_ : Tuple=13 , a_ : Dict=30 , a_ : Optional[int]=2 , a_ : Tuple=3 , a_ : Dict=True , a_ : int=True , a_ : Optional[Any]=32 , a_ : List[str]=5 , a_ : Any=4 , a_ : Dict=37 , a_ : Dict="gelu" , a_ : int=0.1 , a_ : Optional[Any]=0.1 , a_ : Any=10 , a_ : List[str]=0.02 , a_ : Any=3 , a_ : List[str]=None , a_ : Optional[int]=2 , )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : int = batch_size
SCREAMING_SNAKE_CASE__ : int = image_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = scope
SCREAMING_SNAKE_CASE__ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE__ : Optional[int] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_patches + 2
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowercase( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowercase( self : List[str] , a_ : List[str] , a_ : Optional[Any] , a_ : str )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = DeiTModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : List[Any] , a_ : List[str] , a_ : List[str] , a_ : List[Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = DeiTForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DeiTForMaskedImageModeling(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : int = model(a_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowercase( self : List[str] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = DeiTForImageClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : int = DeiTForImageClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase( self : int )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = DeiTModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
pass
def __lowercase( self : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def __lowercase( self : str )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a_ )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : List[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def __lowercase( self : str , a_ : str , a_ : Tuple , a_ : Union[str, Any]=False )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE__ : Tuple = model_class(a_ )
model.to(a_ )
model.train()
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**a_ ).loss
loss.backward()
def __lowercase( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**a_ ).loss
loss.backward()
def __lowercase( self : Optional[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[str] = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a_ ),
*get_values(a_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
SCREAMING_SNAKE_CASE__ : int = problem_type['title']
SCREAMING_SNAKE_CASE__ : Tuple = problem_type['num_labels']
SCREAMING_SNAKE_CASE__ : str = model_class(a_ )
model.to(a_ )
model.train()
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
SCREAMING_SNAKE_CASE__ : Any = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a_ ) as warning_list:
SCREAMING_SNAKE_CASE__ : str = model(**a_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __lowercase( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = DeiTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def __lowercase( self : int )-> Dict:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(images=a_ , return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**a_ )
# verify the logits
SCREAMING_SNAKE_CASE__ : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowercase( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(images=a_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : str = inputs.pixel_values.to(a_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
| 636 | 1 |
import functools
def _a ( lowercase__ : str , lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = len(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(lowercase__ )
@functools.cache
def min_distance(lowercase__ : int , lowercase__ : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
SCREAMING_SNAKE_CASE__ : int = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowercase__ ) , 1 + min_distance(lowercase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 636 | import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self : List[Any] , a_ : Dict , a_ : Any=13 , a_ : Any=7 , a_ : Tuple=True , a_ : Tuple=True , a_ : Optional[int]=False , a_ : Dict=True , a_ : Optional[Any]=99 , a_ : Any=32 , a_ : Dict=5 , a_ : Tuple=4 , a_ : List[str]=37 , a_ : Union[str, Any]="gelu" , a_ : Dict=0.1 , a_ : Tuple=0.1 , a_ : List[str]=512 , a_ : List[str]=16 , a_ : List[str]=2 , a_ : Optional[int]=0.02 , a_ : List[str]=3 , a_ : Union[str, Any]=4 , a_ : Optional[Any]=None , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Dict = batch_size
SCREAMING_SNAKE_CASE__ : Dict = seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = num_labels
SCREAMING_SNAKE_CASE__ : Dict = num_choices
SCREAMING_SNAKE_CASE__ : str = scope
def __lowercase( self : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
def __lowercase( self : Any , a_ : str , a_ : Tuple , a_ : Dict , a_ : Optional[int] , a_ : List[Any] , a_ : Union[str, Any] , a_ : Tuple )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BioGptModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : List[Any] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Tuple , a_ : Optional[Any] , a_ : int , a_ : Optional[int] , a_ : int , a_ : str , a_ : Optional[Any] , )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = BioGptForCausalLM(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase( self : Tuple , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : Any , a_ : Optional[int] , *a_ : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = BioGptModel(config=a_ )
model.to(a_ )
model.eval()
# create attention mask
SCREAMING_SNAKE_CASE__ : Any = torch.ones(input_ids.shape , dtype=torch.long , device=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.seq_length // 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
# first forward pass
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
SCREAMING_SNAKE_CASE__ : str = ids_tensor((1,) , a_ ).item() + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = random_other_next_tokens
# append to next input_ids and attn_mask
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Dict = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=a_ )] , dim=1 , )
# get two different outputs
SCREAMING_SNAKE_CASE__ : str = model(a_ , attention_mask=a_ )['last_hidden_state']
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , past_key_values=a_ , attention_mask=a_ )['last_hidden_state']
# select random slice
SCREAMING_SNAKE_CASE__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : str , a_ : List[Any] , a_ : str , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[Any] , *a_ : List[str] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel(config=a_ ).to(a_ ).eval()
SCREAMING_SNAKE_CASE__ : Dict = torch.ones(input_ids.shape , dtype=torch.long , device=a_ )
# first forward pass
SCREAMING_SNAKE_CASE__ : Any = model(a_ , attention_mask=a_ , use_cache=a_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(a_ , attention_mask=a_ )['last_hidden_state']
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , attention_mask=a_ , past_key_values=a_ )[
'last_hidden_state'
]
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : Any , a_ : List[str] , a_ : Optional[int] , a_ : Any , a_ : Tuple , a_ : Any , *a_ : List[Any] , a_ : Union[str, Any]=False )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = BioGptForCausalLM(a_ )
model.to(a_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __lowercase( self : Union[str, Any] , a_ : List[str] , *a_ : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = BioGptModel(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __lowercase( self : Dict , a_ : Tuple , a_ : Tuple , a_ : List[str] , a_ : Any , a_ : str , *a_ : str )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.num_labels
SCREAMING_SNAKE_CASE__ : str = BioGptForTokenClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , attention_mask=a_ , token_type_ids=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase( self : Any )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowercase_ = (BioGptForCausalLM,) if is_torch_available() else ()
lowercase_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = False
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def __lowercase( self : Tuple )-> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : List[str] = type
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : int )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*a_ )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*a_ , gradient_checkpointing=a_ )
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*a_ )
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*a_ )
def __lowercase( self : str )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*a_ )
@slow
def __lowercase( self : List[str] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(a_ )
SCREAMING_SNAKE_CASE__ : Dict = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE__ : List[str] = 'left'
# Define PAD Token = EOS Token = 50256
SCREAMING_SNAKE_CASE__ : Any = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : Tuple = model.config.eos_token_id
# use different length sentences to test batching
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'Hello, my dog is a little',
'Today, I',
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(a_ , return_tensors='pt' , padding=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = inputs['input_ids'].to(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = model.generate(
input_ids=a_ , attention_mask=inputs['attention_mask'].to(a_ ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(a_ )
SCREAMING_SNAKE_CASE__ : Dict = model.generate(input_ids=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(input_ids=a_ , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(a_ , a_ )
self.assertListEqual(a_ , [non_padded_sentence, padded_sentence] )
@slow
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowercase( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] = 3
SCREAMING_SNAKE_CASE__ : List[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.ne(1 ).to(a_ )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : int = BioGptForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : str = 3
SCREAMING_SNAKE_CASE__ : Any = 'multi_label_classification'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE__ : Any = input_ids.ne(1 ).to(a_ )
SCREAMING_SNAKE_CASE__ : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ : Dict = BioGptForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def __lowercase( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([[2, 4805, 9, 656, 21]] )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ )[0]
SCREAMING_SNAKE_CASE__ : List[str] = 4_2384
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , a_ )
SCREAMING_SNAKE_CASE__ : int = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
@slow
def __lowercase( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE__ : Dict = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(a_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer('COVID-19 is' , return_tensors='pt' ).to(a_ )
SCREAMING_SNAKE_CASE__ : int = model.generate(
**a_ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=a_ , )
SCREAMING_SNAKE_CASE__ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(a_ , a_ )
| 636 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : Dict = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 636 | import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : Optional[Any] = random.Random()
def _a ( lowercase__ : List[str] , lowercase__ : List[Any]=1.0 , lowercase__ : Optional[int]=None , lowercase__ : List[str]=None ):
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = global_rng
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , a_ : Optional[Any] , a_ : Union[str, Any]=7 , a_ : Any=400 , a_ : List[Any]=2000 , a_ : Tuple=1 , a_ : Optional[int]=0.0 , a_ : Optional[Any]=1_6000 , a_ : str=True , a_ : Union[str, Any]=80 , a_ : Dict=16 , a_ : Tuple=64 , a_ : Any="hann_window" , a_ : Union[str, Any]=80 , a_ : List[Any]=7600 , a_ : Optional[Any]=1e-1_0 , a_ : Dict=True , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = min_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = max_seq_length
SCREAMING_SNAKE_CASE__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__ : int = feature_size
SCREAMING_SNAKE_CASE__ : str = padding_value
SCREAMING_SNAKE_CASE__ : Any = sampling_rate
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : int = num_mel_bins
SCREAMING_SNAKE_CASE__ : int = hop_length
SCREAMING_SNAKE_CASE__ : str = win_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = win_function
SCREAMING_SNAKE_CASE__ : List[str] = fmin
SCREAMING_SNAKE_CASE__ : Dict = fmax
SCREAMING_SNAKE_CASE__ : int = mel_floor
SCREAMING_SNAKE_CASE__ : Tuple = return_attention_mask
def __lowercase( self : Dict )-> Dict:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __lowercase( self : List[Any] , a_ : str=False , a_ : List[Any]=False )-> Optional[Any]:
"""simple docstring"""
def _flatten(a_ : int ):
return list(itertools.chain(*a_ ) )
if equal_length:
SCREAMING_SNAKE_CASE__ : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : int = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
def __lowercase( self : Any , a_ : int=False , a_ : Any=False )-> Union[str, Any]:
"""simple docstring"""
if equal_length:
SCREAMING_SNAKE_CASE__ : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Tuple = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : List[str] = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = SpeechTaFeatureExtractor
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = SpeechTaFeatureExtractionTester(self )
def __lowercase( self : Any , a_ : Optional[int] )-> List[str]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_ , axis=0 ) - 1 ) < 1e-3 ) )
def __lowercase( self : Tuple )-> Dict:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ : Tuple = [None, 1600, None]
for max_length, padding in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : str = feat_extract(a_ , padding=a_ , max_length=a_ , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : List[Any] = range(800 , 1400 , 200 )
SCREAMING_SNAKE_CASE__ : int = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , max_length=a_ , padding=a_ )
SCREAMING_SNAKE_CASE__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __lowercase( self : int )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract(
a_ , truncation=a_ , max_length=1000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(
a_ , truncation=a_ , max_length=1000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : str = feat_extract(
a_ , truncation=a_ , max_length=2000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __lowercase( self : Any )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__ : Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Dict = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(audio_target=a_ , padding=a_ , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : int = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : str = feature_extractor(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : Dict )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_ , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE__ : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowercase( self : Tuple )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Dict = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : str = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : List[Any] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : Any = [len(a_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a_ )
def __lowercase( self : str )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : Tuple = [len(a_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : str = min(a_ )
SCREAMING_SNAKE_CASE__ : Any = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : int = feat_extract.pad(
a_ , padding='max_length' , max_length=a_ , truncation=a_ , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __lowercase( self : Optional[int] , a_ : List[str] )-> Any:
"""simple docstring"""
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : int = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__ : List[Any] = ds.sort('id' ).select(range(a_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowercase( self : List[str] )-> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
SCREAMING_SNAKE_CASE__ : List[str] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(a_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , a_ , atol=1e-6 ) )
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : int = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : str = feature_extractor(audio_target=a_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a_ , atol=1e-4 ) )
| 636 | 1 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case :
@staticmethod
def __lowercase( *a_ : List[str] , **a_ : str )-> str:
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
lowercase_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def __lowercase( self : Dict , a_ : Dict , a_ : Optional[int] , a_ : Dict )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
SCREAMING_SNAKE_CASE__ : List[str] = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def __lowercase( self : List[Any] , a_ : Any , a_ : Tuple )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = vqa_pipeline(a_ , top_k=1 )
self.assertEqual(
a_ , [
[{'score': ANY(a_ ), 'answer': ANY(a_ )}],
[{'score': ANY(a_ ), 'answer': ANY(a_ )}],
] , )
@require_torch
def __lowercase( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
SCREAMING_SNAKE_CASE__ : Any = './tests/fixtures/tests_samples/COCO/000000039769.png'
SCREAMING_SNAKE_CASE__ : str = 'How many cats are there?'
SCREAMING_SNAKE_CASE__ : str = vqa_pipeline(image=a_ , question='How many cats are there?' , top_k=2 )
self.assertEqual(
a_ , [{'score': ANY(a_ ), 'answer': ANY(a_ )}, {'score': ANY(a_ ), 'answer': ANY(a_ )}] )
SCREAMING_SNAKE_CASE__ : List[str] = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
a_ , [{'score': ANY(a_ ), 'answer': ANY(a_ )}, {'score': ANY(a_ ), 'answer': ANY(a_ )}] )
@slow
@require_torch
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
SCREAMING_SNAKE_CASE__ : Optional[int] = './tests/fixtures/tests_samples/COCO/000000039769.png'
SCREAMING_SNAKE_CASE__ : Dict = 'How many cats are there?'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vqa_pipeline(image=a_ , question=a_ , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
SCREAMING_SNAKE_CASE__ : List[Any] = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
SCREAMING_SNAKE_CASE__ : int = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def __lowercase( self : List[str] )-> Optional[int]:
"""simple docstring"""
pass
| 636 | import math
import sys
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ''
try:
with open(lowercase__ , 'rb' ) as binary_file:
SCREAMING_SNAKE_CASE__ : Tuple = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE__ : Tuple = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = '', ''
SCREAMING_SNAKE_CASE__ : Tuple = len(lowercase__ )
for i in range(len(lowercase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE__ : int = lexicon[curr_string]
result += last_match_id
SCREAMING_SNAKE_CASE__ : str = last_match_id + '0'
if math.loga(lowercase__ ).is_integer():
SCREAMING_SNAKE_CASE__ : List[str] = {}
for curr_key in list(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = lexicon.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = new_lex
SCREAMING_SNAKE_CASE__ : Any = last_match_id + '1'
index += 1
SCREAMING_SNAKE_CASE__ : Tuple = ''
return result
def _a ( lowercase__ : str , lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = 8
try:
with open(lowercase__ , 'wb' ) as opened_file:
SCREAMING_SNAKE_CASE__ : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(lowercase__ ) , lowercase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowercase__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = data_bits[counter:]
SCREAMING_SNAKE_CASE__ : int = data_bits[counter + 1 :]
return data_bits
def _a ( lowercase__ : str , lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = read_file_binary(lowercase__ )
SCREAMING_SNAKE_CASE__ : Dict = remove_prefix(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = decompress_data(lowercase__ )
write_file_binary(lowercase__ , lowercase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 636 | 1 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
class snake_case ( UpperCamelCase_ ):
def __init__( self : List[Any] , *a_ : int , **a_ : Optional[int] )-> None:
"""simple docstring"""
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , a_ , )
super().__init__(*a_ , **a_ )
| 636 | def _a ( lowercase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : List[Any] = set({'(', '[', '{'} )
SCREAMING_SNAKE_CASE__ : Optional[int] = set({')', ']', '}'} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'{': '}', '[': ']', '(': ')'}
for i in range(len(lowercase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowercase__ ) == 0 or (len(lowercase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowercase__ ) == 0
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = input('Enter sequence of brackets: ' )
if is_balanced(lowercase__ ):
print(lowercase__ , 'is balanced' )
else:
print(lowercase__ , 'is not balanced' )
if __name__ == "__main__":
main()
| 636 | 1 |
class snake_case :
def __init__( self : Dict )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
def __lowercase( self : List[str] , a_ : List[str] )-> str:
"""simple docstring"""
if vertex not in self.adjacency:
SCREAMING_SNAKE_CASE__ : int = {}
self.num_vertices += 1
def __lowercase( self : int , a_ : int , a_ : Tuple , a_ : Dict )-> int:
"""simple docstring"""
self.add_vertex(a_ )
self.add_vertex(a_ )
if head == tail:
return
SCREAMING_SNAKE_CASE__ : Union[str, Any] = weight
SCREAMING_SNAKE_CASE__ : int = weight
def __lowercase( self : int )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = edge
edges.remove((tail, head, weight) )
for i in range(len(a_ ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(edges[i] )
edges.sort(key=lambda a_ : e[2] )
for i in range(len(a_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
SCREAMING_SNAKE_CASE__ : str = edges[i][2] + 1
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = edge
SCREAMING_SNAKE_CASE__ : Optional[Any] = weight
SCREAMING_SNAKE_CASE__ : List[Any] = weight
def __str__( self : List[str] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
SCREAMING_SNAKE_CASE__ : Tuple = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip('\n' )
def __lowercase( self : int )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowercase( self : int )-> Dict:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __lowercase( a_ : Dict=None , a_ : Tuple=None )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = Graph()
if vertices is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
if edges is None:
SCREAMING_SNAKE_CASE__ : str = []
for vertex in vertices:
g.add_vertex(a_ )
for edge in edges:
g.add_edge(*a_ )
return g
class snake_case :
def __init__( self : Optional[int] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE__ : str = {}
def __len__( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
return len(self.parent )
def __lowercase( self : str , a_ : int )-> Tuple:
"""simple docstring"""
if item in self.parent:
return self.find(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = item
SCREAMING_SNAKE_CASE__ : Dict = 0
return item
def __lowercase( self : List[Any] , a_ : Any )-> Tuple:
"""simple docstring"""
if item not in self.parent:
return self.make_set(a_ )
if item != self.parent[item]:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.find(self.parent[item] )
return self.parent[item]
def __lowercase( self : List[Any] , a_ : int , a_ : Optional[int] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.find(a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.find(a_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
SCREAMING_SNAKE_CASE__ : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
SCREAMING_SNAKE_CASE__ : int = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
SCREAMING_SNAKE_CASE__ : Dict = roota
return roota
return None
@staticmethod
def __lowercase( a_ : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = graph.num_vertices
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Graph.UnionFind()
SCREAMING_SNAKE_CASE__ : Tuple = []
while num_components > 1:
SCREAMING_SNAKE_CASE__ : str = {}
for vertex in graph.get_vertices():
SCREAMING_SNAKE_CASE__ : Optional[int] = -1
SCREAMING_SNAKE_CASE__ : Optional[Any] = graph.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = edge
edges.remove((tail, head, weight) )
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = edge
SCREAMING_SNAKE_CASE__ : Dict = union_find.find(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = union_find.find(a_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE__ : int = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = cheap_edge[vertex]
if union_find.find(a_ ) != union_find.find(a_ ):
union_find.union(a_ , a_ )
mst_edges.append(cheap_edge[vertex] )
SCREAMING_SNAKE_CASE__ : Tuple = num_components - 1
SCREAMING_SNAKE_CASE__ : List[Any] = Graph.build(edges=a_ )
return mst
| 636 | import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : List[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : int )-> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : List[Any] = PegasusTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def __lowercase( self : Any , **a_ : Optional[Any] )-> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Union[str, Any] , a_ : List[Any] )-> Optional[int]:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowercase( self : Optional[int] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = '</s>'
SCREAMING_SNAKE_CASE__ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def __lowercase( self : Dict )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(a_ ) , 1103 )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def __lowercase( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE__ : Any = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
SCREAMING_SNAKE_CASE__ : List[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
def __lowercase( self : int )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
SCREAMING_SNAKE_CASE__ : int = 'To ensure a smooth flow of bank resolutions.'
SCREAMING_SNAKE_CASE__ : List[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ['This is going to be way too long.' * 150, 'short example']
SCREAMING_SNAKE_CASE__ : int = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._large_tokenizer(
text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a_ ) == 2 # input_ids, attention_mask.
@slow
def __lowercase( self : Any )-> str:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : Any )-> Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[int] = PegasusTokenizer(a_ , offset=0 , mask_token_sent=a_ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase( self : Optional[Any] )-> List[str]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def __lowercase( self : List[str] , **a_ : Optional[Any] )-> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Optional[Any] , a_ : Tuple )-> str:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowercase( self : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE__ : str = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
SCREAMING_SNAKE_CASE__ : str = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
@require_torch
def __lowercase( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ['This is going to be way too long.' * 1000, 'short example']
SCREAMING_SNAKE_CASE__ : Optional[int] = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE__ : str = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer(
text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a_ ) == 2 # input_ids, attention_mask.
def __lowercase( self : Dict )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._large_tokenizer(a_ ).input_ids
self.assertListEqual(
a_ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 636 | 1 |
from collections import deque
class snake_case :
def __init__( self : Union[str, Any] , a_ : str , a_ : int , a_ : int )-> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = process_name # process name
SCREAMING_SNAKE_CASE__ : Any = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
SCREAMING_SNAKE_CASE__ : Optional[int] = arrival_time
SCREAMING_SNAKE_CASE__ : List[Any] = burst_time # remaining burst time
SCREAMING_SNAKE_CASE__ : Any = 0 # total time of the process wait in ready queue
SCREAMING_SNAKE_CASE__ : List[Any] = 0 # time from arrival time to completion time
class snake_case :
def __init__( self : List[str] , a_ : int , a_ : list[int] , a_ : deque[Process] , a_ : int , )-> None:
"""simple docstring"""
# total number of mlfq's queues
SCREAMING_SNAKE_CASE__ : Dict = number_of_queues
# time slice of queues that round robin algorithm applied
SCREAMING_SNAKE_CASE__ : Tuple = time_slices
# unfinished process is in this ready_queue
SCREAMING_SNAKE_CASE__ : List[str] = queue
# current time
SCREAMING_SNAKE_CASE__ : Dict = current_time
# finished process is in this sequence queue
SCREAMING_SNAKE_CASE__ : deque[Process] = deque()
def __lowercase( self : Tuple )-> list[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __lowercase( self : Tuple , a_ : list[Process] )-> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = []
for i in range(len(a_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __lowercase( self : Any , a_ : list[Process] )-> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = []
for i in range(len(a_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __lowercase( self : Any , a_ : list[Process] )-> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for i in range(len(a_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __lowercase( self : int , a_ : deque[Process] )-> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def __lowercase( self : Optional[int] , a_ : Process )-> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __lowercase( self : List[Any] , a_ : deque[Process] )-> deque[Process]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : deque[Process] = deque() # sequence deque of finished process
while len(a_ ) != 0:
SCREAMING_SNAKE_CASE__ : Any = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(a_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
SCREAMING_SNAKE_CASE__ : Tuple = 0
# set the process's turnaround time because it is finished
SCREAMING_SNAKE_CASE__ : str = self.current_time - cp.arrival_time
# set the completion time
SCREAMING_SNAKE_CASE__ : int = self.current_time
# add the process to queue that has finished queue
finished.append(a_ )
self.finish_queue.extend(a_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __lowercase( self : Union[str, Any] , a_ : deque[Process] , a_ : int )-> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(a_ ) ):
SCREAMING_SNAKE_CASE__ : Tuple = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(a_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(a_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
SCREAMING_SNAKE_CASE__ : List[Any] = 0
# set the finish time
SCREAMING_SNAKE_CASE__ : int = self.current_time
# update the process' turnaround time because it is finished
SCREAMING_SNAKE_CASE__ : List[str] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(a_ )
self.finish_queue.extend(a_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __lowercase( self : int )-> deque[Process]:
"""simple docstring"""
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
SCREAMING_SNAKE_CASE__ : List[str] = Process("P1", 0, 53)
SCREAMING_SNAKE_CASE__ : List[str] = Process("P2", 0, 17)
SCREAMING_SNAKE_CASE__ : Any = Process("P3", 0, 68)
SCREAMING_SNAKE_CASE__ : Optional[int] = Process("P4", 0, 24)
SCREAMING_SNAKE_CASE__ : Optional[Any] = 3
SCREAMING_SNAKE_CASE__ : int = [17, 25]
SCREAMING_SNAKE_CASE__ : str = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Process("P1", 0, 53)
SCREAMING_SNAKE_CASE__ : List[Any] = Process("P2", 0, 17)
SCREAMING_SNAKE_CASE__ : Optional[Any] = Process("P3", 0, 68)
SCREAMING_SNAKE_CASE__ : int = Process("P4", 0, 24)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 3
SCREAMING_SNAKE_CASE__ : List[Any] = [17, 25]
SCREAMING_SNAKE_CASE__ : int = deque([Pa, Pa, Pa, Pa])
SCREAMING_SNAKE_CASE__ : List[str] = MLFQ(number_of_queues, time_slices, queue, 0)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 636 | def _a ( lowercase__ : int = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowercase__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 636 | 1 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
SCREAMING_SNAKE_CASE__ : int = True
from torch.cuda.amp import autocast
SCREAMING_SNAKE_CASE__ : List[Any] = logging.getLogger(__name__)
@dataclass
class snake_case :
lowercase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowercase_ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
lowercase_ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
lowercase_ = field(
default=0.999995 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def _a ( lowercase__ : ModelArguments , lowercase__ : TrainingArguments ):
'''simple docstring'''
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE__ : str = logging.WARNING
if model_args.verbose_logging:
SCREAMING_SNAKE_CASE__ : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
SCREAMING_SNAKE_CASE__ : List[str] = logging.INFO
logger.setLevel(lowercase__ )
@dataclass
class snake_case :
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowercase_ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowercase_ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowercase_ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowercase_ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowercase_ = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class snake_case :
lowercase_ = 42
lowercase_ = 42
lowercase_ = "longest"
lowercase_ = None
lowercase_ = None
def __call__( self : int , a_ : List[Dict[str, Union[List[int], torch.Tensor]]] )-> Dict[str, torch.Tensor]:
"""simple docstring"""
# reformat list to dict and set to pytorch format
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.feature_extractor.pad(
a_ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
SCREAMING_SNAKE_CASE__ : List[Any] = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
SCREAMING_SNAKE_CASE__ : Tuple = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
SCREAMING_SNAKE_CASE__ : str = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
SCREAMING_SNAKE_CASE__ : List[Any] = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
SCREAMING_SNAKE_CASE__ : str = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=a_ , min_masks=2 , )
return batch
class snake_case ( UpperCamelCase_ ):
def __init__( self : str , *a_ : List[str] , a_ : Dict=1 , a_ : Union[str, Any]=0 , a_ : Union[str, Any]=1.0 , **a_ : List[str] )-> Any:
"""simple docstring"""
super().__init__(*a_ , **a_ )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_gumbel_temp
SCREAMING_SNAKE_CASE__ : int = min_gumbel_temp
SCREAMING_SNAKE_CASE__ : int = gumbel_temp_decay
def __lowercase( self : int , a_ : nn.Module , a_ : Dict[str, Union[torch.Tensor, Any]] )-> torch.Tensor:
"""simple docstring"""
model.train()
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_inputs(a_ )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE__ : List[str] = self.compute_loss(a_ , a_ )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.compute_loss(a_ , a_ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE__ : str = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE__ : List[Any] = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE__ : str = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(a_ ).backward()
elif self.use_apex:
with amp.scale_loss(a_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(a_ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
configure_logger(lowercase__ , lowercase__ )
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DatasetDict()
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE__ : Optional[Any] = DatasetDict()
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowercase__ )
def prepare_dataset(lowercase__ : List[Any] ):
# check that all files have the correct sampling rate
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
SCREAMING_SNAKE_CASE__ : List[str] = datasets.map(
lowercase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
SCREAMING_SNAKE_CASE__ : Dict = vectorized_datasets.filter(
lambda lowercase__ : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowercase__ : List[Any] ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
SCREAMING_SNAKE_CASE__ : Optional[Any] = vectorized_datasets.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
SCREAMING_SNAKE_CASE__ : int = WavaVecaForPreTraining(lowercase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DataCollatorForWavaVecaPretraining(model=lowercase__ , feature_extractor=lowercase__ )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaPreTrainer(
model=lowercase__ , data_collator=lowercase__ , args=lowercase__ , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=lowercase__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 636 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any]=False , lowercase__ : str=False , lowercase__ : Dict=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _a ( lowercase__ : List[str] , lowercase__ : Dict ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ : Dict = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_bias[-config.hidden_size :]
def _a ( lowercase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _a ( lowercase__ : int , lowercase__ : int , lowercase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = dct.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = val
@torch.no_grad()
def _a ( lowercase__ : Dict , lowercase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : str = False
if "vqa" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : str = 31_29
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ : int = 'vqa2-id2label.json'
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Dict = idalabel
SCREAMING_SNAKE_CASE__ : str = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[str] = ViltForQuestionAnswering(lowercase__ )
elif "nlvr" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Dict = {0: 'False', 1: 'True'}
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in config.idalabel.items()}
SCREAMING_SNAKE_CASE__ : Tuple = 3
SCREAMING_SNAKE_CASE__ : int = ViltForImagesAndTextClassification(lowercase__ )
elif "irtr" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : str = ViltForImageAndTextRetrieval(lowercase__ )
elif "mlm_itm" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Optional[int] = ViltForMaskedLM(lowercase__ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ : Any = torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )['state_dict']
SCREAMING_SNAKE_CASE__ : Any = create_rename_keys(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ )
if mlm_model or irtr_model:
SCREAMING_SNAKE_CASE__ : Any = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowercase__ )
# Define processor
SCREAMING_SNAKE_CASE__ : str = ViltImageProcessor(size=3_84 )
SCREAMING_SNAKE_CASE__ : List[Any] = BertTokenizer.from_pretrained('bert-base-uncased' )
SCREAMING_SNAKE_CASE__ : List[Any] = ViltProcessor(lowercase__ , lowercase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
SCREAMING_SNAKE_CASE__ : List[str] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw )
SCREAMING_SNAKE_CASE__ : Any = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw )
SCREAMING_SNAKE_CASE__ : Tuple = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
SCREAMING_SNAKE_CASE__ : List[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : List[str] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : List[Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
SCREAMING_SNAKE_CASE__ : Tuple = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=lowercase__ ).raw )
if mlm_model:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'a bunch of [MASK] laying on a [MASK].'
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'How many cats are there?'
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : str = model(**lowercase__ )
# Verify outputs
if mlm_model:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size([1, 11, 3_05_22] )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 )
# verify masked token prediction equals "cats"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
SCREAMING_SNAKE_CASE__ : str = torch.Size([1, 31_29] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 )
# verify vqa prediction equals "2"
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size([1, 2] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 636 | 1 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
SCREAMING_SNAKE_CASE__ : Optional[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def _a ( lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : List[str]=None ):
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = random.Random()
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE__ : Tuple = []
for _ in range(lowercase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array(lowercase__ , dtype=jnp.intaa ).reshape(lowercase__ )
return output
def _a ( lowercase__ : Optional[Any] , lowercase__ : int=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ids_tensor(lowercase__ , vocab_size=2 , rng=lowercase__ )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE__ : List[Any] = 1
return attn_mask
@require_flax
class snake_case :
lowercase_ = None
lowercase_ = ()
def __lowercase( self : str )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE__ : str = 2
SCREAMING_SNAKE_CASE__ : int = inputs['input_ids'].shape[-1] // 2
SCREAMING_SNAKE_CASE__ : Any = inputs['input_ids'][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.ones_like(a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE__ : Tuple = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __lowercase( self : List[str] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : str = max_length
SCREAMING_SNAKE_CASE__ : Dict = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : Any = model_class(a_ )
SCREAMING_SNAKE_CASE__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = pt_model_class(a_ ).eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_flax_weights_in_pytorch_model(a_ , flax_model.params )
SCREAMING_SNAKE_CASE__ : Dict = flax_model.generate(a_ ).sequences
SCREAMING_SNAKE_CASE__ : str = pt_model.generate(torch.tensor(a_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE__ : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __lowercase( self : Optional[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : str = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : str = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = jit(model.generate )
SCREAMING_SNAKE_CASE__ : Dict = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowercase( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ : List[str] = True
SCREAMING_SNAKE_CASE__ : str = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
SCREAMING_SNAKE_CASE__ : Any = jit(model.generate )
SCREAMING_SNAKE_CASE__ : str = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowercase( self : int )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : str = max_length
SCREAMING_SNAKE_CASE__ : Any = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(a_ )
SCREAMING_SNAKE_CASE__ : str = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = jit(model.generate )
SCREAMING_SNAKE_CASE__ : Tuple = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowercase( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_length
SCREAMING_SNAKE_CASE__ : Tuple = 2
SCREAMING_SNAKE_CASE__ : Tuple = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : Dict = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __lowercase( self : Any )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : int = max_length
SCREAMING_SNAKE_CASE__ : List[str] = 0.8
SCREAMING_SNAKE_CASE__ : Tuple = 10
SCREAMING_SNAKE_CASE__ : str = 0.3
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
SCREAMING_SNAKE_CASE__ : Any = 8
SCREAMING_SNAKE_CASE__ : Dict = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
SCREAMING_SNAKE_CASE__ : Any = jit(model.generate )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowercase( self : Dict )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_length
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 8
SCREAMING_SNAKE_CASE__ : str = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(a_ )
SCREAMING_SNAKE_CASE__ : Dict = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jit(model.generate )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ : List[Any] = max_length
SCREAMING_SNAKE_CASE__ : Tuple = 2
SCREAMING_SNAKE_CASE__ : List[Any] = 1
SCREAMING_SNAKE_CASE__ : Any = 8
SCREAMING_SNAKE_CASE__ : Optional[int] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : int = model.generate(a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jit(model.generate )
SCREAMING_SNAKE_CASE__ : int = jit_generate(a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowercase( self : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE__ : List[str] = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate(a_ , attention_mask=a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = jit(model.generate )
SCREAMING_SNAKE_CASE__ : Tuple = jit_generate(a_ , attention_mask=a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowercase( self : List[str] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE__ : List[Any] = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE__ : List[str] = True
SCREAMING_SNAKE_CASE__ : Dict = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(a_ )
SCREAMING_SNAKE_CASE__ : int = model.generate(a_ , attention_mask=a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jit(model.generate )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jit_generate(a_ , attention_mask=a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE__ : List[str] = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE__ : Any = 2
SCREAMING_SNAKE_CASE__ : List[Any] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple = model_class(a_ )
SCREAMING_SNAKE_CASE__ : Any = model.generate(a_ , attention_mask=a_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = jit(model.generate )
SCREAMING_SNAKE_CASE__ : Dict = jit_generate(a_ , attention_mask=a_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class snake_case ( unittest.TestCase ):
def __lowercase( self : Optional[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'Hello world'
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(a_ , return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(a_ , 'do_samples' ):
model.generate(a_ , do_samples=a_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(a_ , 'foo' ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'foo': 'bar'}
model.generate(a_ , **a_ )
| 636 | from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case :
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
def __lowercase( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __lowercase( self : Dict )-> Union[str, Any]:
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __lowercase( self : Tuple )-> torch.Tensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = torch.arange(self.height * self.width )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(a_ , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.shape
SCREAMING_SNAKE_CASE__ : Tuple = int(np.prod(a_ ) )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_coords()
SCREAMING_SNAKE_CASE__ : Dict = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
SCREAMING_SNAKE_CASE__ : Any = self.get_camera_rays(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = rays.view(a_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __lowercase( self : Optional[Any] , a_ : torch.Tensor )-> torch.Tensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
SCREAMING_SNAKE_CASE__ : str = coords.view(a_ , -1 , 2 )
SCREAMING_SNAKE_CASE__ : List[Any] = self.resolution()
SCREAMING_SNAKE_CASE__ : str = self.fov()
SCREAMING_SNAKE_CASE__ : Any = (flat.float() / (res - 1)) * 2 - 1
SCREAMING_SNAKE_CASE__ : Any = fracs * torch.tan(fov / 2 )
SCREAMING_SNAKE_CASE__ : List[str] = fracs.view(a_ , -1 , 2 )
SCREAMING_SNAKE_CASE__ : str = (
self.z.view(a_ , 1 , 3 )
+ self.x.view(a_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(a_ , 1 , 3 ) * fracs[:, :, 1:]
)
SCREAMING_SNAKE_CASE__ : Tuple = directions / directions.norm(dim=-1 , keepdim=a_ )
SCREAMING_SNAKE_CASE__ : Any = torch.stack(
[
torch.broadcast_to(self.origin.view(a_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(a_ , *a_ , 2 , 3 )
def __lowercase( self : Optional[int] , a_ : int , a_ : int )-> "DifferentiableProjectiveCamera":
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=a_ , height=a_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def _a ( lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : str = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([np.sin(lowercase__ ), np.cos(lowercase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
SCREAMING_SNAKE_CASE__ : Tuple = -z * 4
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([np.cos(lowercase__ ), -np.sin(lowercase__ ), 0.0] )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.cross(lowercase__ , lowercase__ )
origins.append(lowercase__ )
xs.append(lowercase__ )
ys.append(lowercase__ )
zs.append(lowercase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , width=lowercase__ , height=lowercase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase__ )) , )
| 636 | 1 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ : Optional[int] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def _a ( lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Tuple=8 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class snake_case ( UpperCamelCase_ ):
def __init__( self : Optional[Any] , a_ : MultilingualCLIP , a_ : XLMRobertaTokenizer , a_ : UNetaDConditionModel , a_ : Union[DDIMScheduler, DDPMScheduler] , a_ : VQModel , )-> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(
text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , movq=a_ , )
SCREAMING_SNAKE_CASE__ : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowercase( self : Union[str, Any] , a_ : int , a_ : Any , a_ : int , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Union[str, Any] )-> Tuple:
"""simple docstring"""
if latents is None:
SCREAMING_SNAKE_CASE__ : Tuple = randn_tensor(a_ , generator=a_ , device=a_ , dtype=a_ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
SCREAMING_SNAKE_CASE__ : List[Any] = latents.to(a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def __lowercase( self : List[Any] , a_ : List[Any] , a_ : Tuple , a_ : List[str] , a_ : Dict , a_ : str=None , )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = len(a_ ) if isinstance(a_ , a_ ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer(
a_ , padding='max_length' , truncation=a_ , max_length=77 , return_attention_mask=a_ , add_special_tokens=a_ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE__ : Optional[int] = text_inputs.input_ids
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer(a_ , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_input_ids.to(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = text_inputs.attention_mask.to(a_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.text_encoder(
input_ids=a_ , attention_mask=a_ )
SCREAMING_SNAKE_CASE__ : List[str] = prompt_embeds.repeat_interleave(a_ , dim=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_encoder_hidden_states.repeat_interleave(a_ , dim=0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text_mask.repeat_interleave(a_ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ : List[str]
if negative_prompt is None:
SCREAMING_SNAKE_CASE__ : Dict = [''] * batch_size
elif type(a_ ) is not type(a_ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(a_ )} !='''
F''' {type(a_ )}.''' )
elif isinstance(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : Any = [negative_prompt]
elif batch_size != len(a_ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(a_ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
SCREAMING_SNAKE_CASE__ : Any = negative_prompt
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(
a_ , padding='max_length' , max_length=77 , truncation=a_ , return_attention_mask=a_ , add_special_tokens=a_ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE__ : Tuple = uncond_input.input_ids.to(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = uncond_input.attention_mask.to(a_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.text_encoder(
input_ids=a_ , attention_mask=a_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE__ : List[str] = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = negative_prompt_embeds.repeat(1 , a_ )
SCREAMING_SNAKE_CASE__ : Dict = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a_ )
SCREAMING_SNAKE_CASE__ : Dict = uncond_text_encoder_hidden_states.shape[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = uncond_text_encoder_hidden_states.repeat(1 , a_ , 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a_ , -1 )
SCREAMING_SNAKE_CASE__ : int = uncond_text_mask.repeat_interleave(a_ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
SCREAMING_SNAKE_CASE__ : int = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowercase( self : Union[str, Any] , a_ : Tuple=0 )-> Optional[Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.device(F'''cuda:{gpu_id}''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a_ , a_ )
def __lowercase( self : Any , a_ : Union[str, Any]=0 )-> Union[str, Any]:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=a_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE__ : Any = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = cpu_offload_with_hook(a_ , a_ , prev_module_hook=a_ )
if self.safety_checker is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = cpu_offload_with_hook(self.safety_checker , a_ , prev_module_hook=a_ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE__ : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowercase( self : Optional[int] )-> Dict:
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(a_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a_ )
def __call__( self : List[Any] , a_ : Union[str, List[str]] , a_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , a_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , a_ : Optional[Union[str, List[str]]] = None , a_ : int = 512 , a_ : int = 512 , a_ : int = 100 , a_ : float = 4.0 , a_ : int = 1 , a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , )-> List[Any]:
"""simple docstring"""
if isinstance(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : Tuple = 1
elif isinstance(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : List[str] = len(a_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(a_ )}''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._execution_device
SCREAMING_SNAKE_CASE__ : str = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE__ : Optional[Any] = guidance_scale > 1.0
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self._encode_prompt(
a_ , a_ , a_ , a_ , a_ )
if isinstance(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat(a_ , dim=0 )
if isinstance(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : int = torch.cat(a_ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ : Optional[int] = image_embeds.repeat_interleave(a_ , dim=0 )
SCREAMING_SNAKE_CASE__ : int = negative_image_embeds.repeat_interleave(a_ , dim=0 )
SCREAMING_SNAKE_CASE__ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a_ )
self.scheduler.set_timesteps(a_ , device=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler.timesteps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.unet.config.in_channels
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = get_new_h_w(a_ , a_ , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE__ : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a_ , a_ , a_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(a_ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE__ : str = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE__ : int = self.unet(
sample=a_ , timestep=a_ , encoder_hidden_states=a_ , added_cond_kwargs=a_ , return_dict=a_ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler.step(
a_ , a_ , a_ , generator=a_ , ).prev_sample
# post-processing
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.movq.decode(a_ , force_not_quantize=a_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE__ : List[str] = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ : Dict = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 636 | import requests
SCREAMING_SNAKE_CASE__ : int = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(f'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 636 | 1 |
from __future__ import annotations
def _a ( lowercase__ : list , lowercase__ : int | None = None , lowercase__ : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Dict = 0
if end is None:
SCREAMING_SNAKE_CASE__ : int = len(lowercase__ ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : Tuple = (start + end) // 2
slowsort(lowercase__ , lowercase__ , lowercase__ )
slowsort(lowercase__ , mid + 1 , lowercase__ )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = sequence[mid], sequence[end]
slowsort(lowercase__ , lowercase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 636 | import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger()
@dataclass
class snake_case :
lowercase_ = 42
lowercase_ = field(default_factory=UpperCamelCase_ )
lowercase_ = field(default_factory=UpperCamelCase_ )
def __lowercase( self : Dict , a_ : Dict , a_ : Tensor , a_ : Tensor )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = len(list(m.modules() ) ) == 1 or isinstance(a_ , nn.Convad ) or isinstance(a_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a_ )
def __call__( self : Tuple , a_ : Tensor )-> Any:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a_ )
[x.remove() for x in self.handles]
return self
@property
def __lowercase( self : Tuple )-> int:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class snake_case :
lowercase_ = 42
lowercase_ = 42
lowercase_ = 1
lowercase_ = field(default_factory=UpperCamelCase_ )
lowercase_ = field(default_factory=UpperCamelCase_ )
lowercase_ = True
def __call__( self : List[Any] , a_ : Tensor )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = Tracker(self.dest )(a_ ).parametrized
SCREAMING_SNAKE_CASE__ : Optional[int] = Tracker(self.src )(a_ ).parametrized
SCREAMING_SNAKE_CASE__ : List[str] = list(filter(lambda a_ : type(a_ ) not in self.src_skip , a_ ) )
SCREAMING_SNAKE_CASE__ : Dict = list(filter(lambda a_ : type(a_ ) not in self.dest_skip , a_ ) )
if len(a_ ) != len(a_ ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(a_ )} operations while'''
F''' destination module has {len(a_ )}.''' )
for dest_m, src_m in zip(a_ , a_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class snake_case ( nn.Module ):
def __init__( self : List[Any] , a_ : nn.Module )-> Dict:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), F'''Unexpected layer name {k}'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a_ ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
SCREAMING_SNAKE_CASE__ : Any = nn.ModuleDict(a_ )
def __lowercase( self : Tuple , a_ : Tensor )-> Dict:
"""simple docstring"""
return get_trunk_forward_outputs(
a_ , out_feat_keys=a_ , feature_blocks=self._feature_blocks , )
class snake_case ( UpperCamelCase_ ):
def __lowercase( self : Optional[Any] , a_ : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Union[str, Any] , a_ : str )-> Callable[[], Tuple[nn.Module, Dict]]:
"""simple docstring"""
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE__ : Any = self.convert_name_to_timm(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = partial(lambda: (timm.create_model(a_ , pretrained=a_ ).eval(), None) )
else:
SCREAMING_SNAKE_CASE__ : List[str] = super().__getitem__(a_ )
return val
class snake_case ( UpperCamelCase_ ):
def __getitem__( self : Any , a_ : str )-> Callable[[], nn.Module]:
"""simple docstring"""
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE__ : Any = RegNetModel
else:
SCREAMING_SNAKE_CASE__ : Any = RegNetForImageClassification
return val
def _a ( lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE__ : Tuple = from_state_dict[from_key].clone()
print(f'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def _a ( lowercase__ : str , lowercase__ : Callable[[], nn.Module] , lowercase__ : Callable[[], nn.Module] , lowercase__ : RegNetConfig , lowercase__ : Path , lowercase__ : bool = True , ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = from_model_func()
SCREAMING_SNAKE_CASE__ : int = our_model_func(lowercase__ ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = ModuleTransfer(src=lowercase__ , dest=lowercase__ , raise_if_mismatch=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(lowercase__ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE__ : int = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
SCREAMING_SNAKE_CASE__ : Optional[Any] = manually_copy_vissl_head(lowercase__ , our_model.state_dict() , lowercase__ )
our_model.load_state_dict(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = our_model(lowercase__ , output_hidden_states=lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = (
our_outputs.logits if isinstance(lowercase__ , lowercase__ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE__ : List[Any] = from_model(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = from_output[-1] if type(lowercase__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = our_outputs.hidden_states[-1]
assert torch.allclose(lowercase__ , lowercase__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=lowercase__ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2_24 if 'seer' not in name else 3_84
# we can use the convnext one
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=lowercase__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=lowercase__ , )
print(f'''Pushed {name}''' )
def _a ( lowercase__ : Path , lowercase__ : str = None , lowercase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE__ : Tuple = 10_00
SCREAMING_SNAKE_CASE__ : Tuple = (1, num_labels)
SCREAMING_SNAKE_CASE__ : str = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE__ : List[str] = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' ) ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = idalabel
SCREAMING_SNAKE_CASE__ : Tuple = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Any = partial(lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
SCREAMING_SNAKE_CASE__ : List[Any] = NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE__ : Dict = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowercase__ : str , lowercase__ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE__ : str = torch.hub.load_state_dict_from_url(lowercase__ , model_dir=str(lowercase__ ) , map_location='cpu' )
SCREAMING_SNAKE_CASE__ : Tuple = model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE__ : str = files['classy_state_dict']['base_model']['model']
SCREAMING_SNAKE_CASE__ : str = model_state_dict['trunk']
model.load_state_dict(lowercase__ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE__ : Any = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : int = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : List[Any] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
SCREAMING_SNAKE_CASE__ : List[Any] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE__ : Any = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowercase__ , lowercase__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowercase__ , lowercase__ , lowercase__ , )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 636 | 1 |
from math import pow
def _a ( lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int , ):
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(pow(lowercase__ , lowercase__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = backtrack(
lowercase__ , lowercase__ , current_number + 1 , lowercase__ , lowercase__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = backtrack(
lowercase__ , lowercase__ , current_number + 1 , lowercase__ , lowercase__ )
return current_sum, solutions_count
def _a ( lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
'Invalid input\n'
'needed_sum must be between 1 and 1000, power between 2 and 10.' )
return backtrack(lowercase__ , lowercase__ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 636 | import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'OwlViTImageProcessor'
lowercase_ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[str] , a_ : List[Any]=None , a_ : str=None , **a_ : Any )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE__ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a_ , a_ )
def __call__( self : Any , a_ : Optional[int]=None , a_ : Tuple=None , a_ : List[Any]=None , a_ : Tuple="max_length" , a_ : str="np" , **a_ : Any )-> int:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(a_ , a_ ) or (isinstance(a_ , a_ ) and not isinstance(text[0] , a_ )):
SCREAMING_SNAKE_CASE__ : Tuple = [self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )]
elif isinstance(a_ , a_ ) and isinstance(text[0] , a_ ):
SCREAMING_SNAKE_CASE__ : Any = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE__ : str = max([len(a_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(a_ ) != max_num_queries:
SCREAMING_SNAKE_CASE__ : Tuple = t + [' '] * (max_num_queries - len(a_ ))
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )
encodings.append(a_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
SCREAMING_SNAKE_CASE__ : Dict = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ : List[Any] = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ : str = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ : Dict = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
SCREAMING_SNAKE_CASE__ : Optional[int] = BatchEncoding()
SCREAMING_SNAKE_CASE__ : List[str] = input_ids
SCREAMING_SNAKE_CASE__ : Tuple = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE__ : Any = BatchEncoding()
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor(
a_ , return_tensors=a_ , **a_ ).pixel_values
SCREAMING_SNAKE_CASE__ : Dict = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Dict = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def __lowercase( self : str , *a_ : List[str] , **a_ : int )-> List[Any]:
"""simple docstring"""
return self.image_processor.post_process(*a_ , **a_ )
def __lowercase( self : Tuple , *a_ : List[str] , **a_ : str )-> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*a_ , **a_ )
def __lowercase( self : Optional[Any] , *a_ : str , **a_ : Dict )-> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*a_ , **a_ )
def __lowercase( self : Optional[int] , *a_ : Tuple , **a_ : Tuple )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def __lowercase( self : Tuple , *a_ : Tuple , **a_ : Tuple )-> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
def __lowercase( self : Tuple )-> Any:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a_ , )
return self.image_processor_class
@property
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a_ , )
return self.image_processor
| 636 | 1 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _a ( lowercase__ : Tuple ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _a ( ):
'''simple docstring'''
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
SCREAMING_SNAKE_CASE__ : int = [1, 2, 3]
with pytest.raises(lowercase__ ):
with parallel_backend('unsupported backend' ):
map_nested(lowercase__ , lowercase__ , num_proc=2 )
with pytest.raises(lowercase__ ):
with parallel_backend('unsupported backend' ):
map_nested(lowercase__ , lowercase__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def _a ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = [1, 2]
SCREAMING_SNAKE_CASE__ : Any = {'a': 1, 'b': 2}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'a': [1, 2], 'b': [3, 4]}
SCREAMING_SNAKE_CASE__ : Optional[int] = {'a': {'1': 1}, 'b': 2}
SCREAMING_SNAKE_CASE__ : List[str] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
SCREAMING_SNAKE_CASE__ : List[str] = [2, 3]
SCREAMING_SNAKE_CASE__ : Dict = {'a': 2, 'b': 3}
SCREAMING_SNAKE_CASE__ : List[Any] = {'a': [2, 3], 'b': [4, 5]}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'a': {'1': 2}, 'b': 3}
SCREAMING_SNAKE_CASE__ : int = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
assert map_nested(lowercase__ , lowercase__ , num_proc=lowercase__ ) == expected_map_nested_sa
| 636 | class snake_case ( UpperCamelCase_ ):
pass
class snake_case ( UpperCamelCase_ ):
pass
class snake_case :
def __init__( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [
[],
[],
[],
]
def __lowercase( self : int , a_ : int , a_ : int )-> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(a_ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def __lowercase( self : int )-> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self : Any )-> str:
"""simple docstring"""
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class snake_case :
def __init__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = []
def __lowercase( self : List[str] , a_ : int )-> None:
"""simple docstring"""
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(a_ )
def __lowercase( self : int )-> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = min(self.queue )
self.queue.remove(a_ )
return data
def __str__( self : List[str] )-> str:
"""simple docstring"""
return str(self.queue )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 636 | 1 |
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = [int(lowercase__ ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(lowercase__ ) == 4 and all(0 <= int(lowercase__ ) <= 2_54 for octet in octets )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = input().strip()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 636 | from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _a ( lowercase__ : List[str] ):
'''simple docstring'''
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE__ : str = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase__ ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[int] , *lowercase__ : int , **lowercase__ : Tuple ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase__ , **lowercase__ )
return wrapper
| 636 | 1 |
def _a ( lowercase__ : int = 10 , lowercase__ : int = 10_00 , lowercase__ : bool = True ):
'''simple docstring'''
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def _a ( lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
return int((number_a + number_a) / 2 )
def _a ( lowercase__ : int , lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
assert (
isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(lowercase__ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
SCREAMING_SNAKE_CASE__ : Tuple = lower
SCREAMING_SNAKE_CASE__ : int = higher
SCREAMING_SNAKE_CASE__ : List[Any] = []
while True:
SCREAMING_SNAKE_CASE__ : Dict = get_avg(lowercase__ , lowercase__ )
last_numbers.append(lowercase__ )
if answer(lowercase__ ) == "low":
SCREAMING_SNAKE_CASE__ : Dict = number
elif answer(lowercase__ ) == "high":
SCREAMING_SNAKE_CASE__ : Optional[int] = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''' )
print(f'''details : {last_numbers!s}''' )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = int(input('Enter lower value : ' ).strip() )
SCREAMING_SNAKE_CASE__ : Tuple = int(input('Enter high value : ' ).strip() )
SCREAMING_SNAKE_CASE__ : str = int(input('Enter value to guess : ' ).strip() )
guess_the_number(lowercase__ , lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 636 | import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _a ( lowercase__ : int ):
'''simple docstring'''
if is_torch_version('<' , '2.0.0' ) or not hasattr(lowercase__ , '_dynamo' ):
return False
return isinstance(lowercase__ , torch._dynamo.eval_frame.OptimizedModule )
def _a ( lowercase__ : Optional[Any] , lowercase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE__ : Dict = is_compiled_module(lowercase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : Tuple = model
SCREAMING_SNAKE_CASE__ : int = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Any = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(lowercase__ , 'forward' )
SCREAMING_SNAKE_CASE__ : str = model.__dict__.pop('_original_forward' , lowercase__ )
if original_forward is not None:
while hasattr(lowercase__ , '__wrapped__' ):
SCREAMING_SNAKE_CASE__ : Dict = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE__ : Dict = forward
if getattr(lowercase__ , '_converted_to_transformer_engine' , lowercase__ ):
convert_model(lowercase__ , to_transformer_engine=lowercase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : List[Any] = model
SCREAMING_SNAKE_CASE__ : Optional[Any] = compiled_model
return model
def _a ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _a ( lowercase__ : str , lowercase__ : Optional[Any] ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowercase__ , lowercase__ )
elif PartialState().local_process_index == 0:
torch.save(lowercase__ , lowercase__ )
@contextmanager
def _a ( **lowercase__ : str ):
'''simple docstring'''
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE__ : int = str(lowercase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if not hasattr(lowercase__ , '__qualname__' ) and not hasattr(lowercase__ , '__name__' ):
SCREAMING_SNAKE_CASE__ : Any = getattr(lowercase__ , '__class__' , lowercase__ )
if hasattr(lowercase__ , '__qualname__' ):
return obj.__qualname__
if hasattr(lowercase__ , '__name__' ):
return obj.__name__
return str(lowercase__ )
def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ):
'''simple docstring'''
for key, value in source.items():
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = destination.setdefault(lowercase__ , {} )
merge_dicts(lowercase__ , lowercase__ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = value
return destination
def _a ( lowercase__ : int = None ):
'''simple docstring'''
if port is None:
SCREAMING_SNAKE_CASE__ : int = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 636 | 1 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _a ( lowercase__ : Dict , lowercase__ : List[str]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if token is not None:
SCREAMING_SNAKE_CASE__ : Tuple = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
SCREAMING_SNAKE_CASE__ : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE__ : List[str] = requests.get(lowercase__ , headers=lowercase__ ).json()
SCREAMING_SNAKE_CASE__ : List[Any] = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
SCREAMING_SNAKE_CASE__ : Any = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = requests.get(url + f'''&page={i + 2}''' , headers=lowercase__ ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def _a ( lowercase__ : List[str] , lowercase__ : List[Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if token is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
SCREAMING_SNAKE_CASE__ : Dict = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
SCREAMING_SNAKE_CASE__ : Optional[int] = requests.get(lowercase__ , headers=lowercase__ ).json()
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
SCREAMING_SNAKE_CASE__ : str = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = requests.get(url + f'''&page={i + 2}''' , headers=lowercase__ ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def _a ( lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = None
if token is not None:
SCREAMING_SNAKE_CASE__ : Dict = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
SCREAMING_SNAKE_CASE__ : Dict = requests.get(lowercase__ , headers=lowercase__ , allow_redirects=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = result.headers['Location']
SCREAMING_SNAKE_CASE__ : Tuple = requests.get(lowercase__ , allow_redirects=lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = os.path.join(lowercase__ , f'''{artifact_name}.zip''' )
with open(lowercase__ , 'wb' ) as fp:
fp.write(response.content )
def _a ( lowercase__ : Optional[Any] , lowercase__ : Dict=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : List[Any] = None
with zipfile.ZipFile(lowercase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowercase__ ) as f:
for line in f:
SCREAMING_SNAKE_CASE__ : List[str] = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
SCREAMING_SNAKE_CASE__ : List[str] = line[: line.index(': ' )]
SCREAMING_SNAKE_CASE__ : str = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
SCREAMING_SNAKE_CASE__ : List[Any] = line[len('FAILED ' ) :]
failed_tests.append(lowercase__ )
elif filename == "job_name.txt":
SCREAMING_SNAKE_CASE__ : List[Any] = line
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(lowercase__ )} for `errors` '''
f'''and {len(lowercase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
' problem.' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if job_name and job_links:
SCREAMING_SNAKE_CASE__ : Any = job_links.get(lowercase__ , lowercase__ )
# A list with elements of the form (line of error, error, failed test)
SCREAMING_SNAKE_CASE__ : Optional[int] = [x + [y] + [job_link] for x, y in zip(lowercase__ , lowercase__ )]
return result
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Any=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : str = [os.path.join(lowercase__ , lowercase__ ) for p in os.listdir(lowercase__ ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowercase__ , job_links=lowercase__ ) )
return errors
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Tuple=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = Counter()
counter.update([x[1] for x in logs] )
SCREAMING_SNAKE_CASE__ : List[Any] = counter.most_common()
SCREAMING_SNAKE_CASE__ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
SCREAMING_SNAKE_CASE__ : Tuple = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
SCREAMING_SNAKE_CASE__ : Optional[int] = dict(sorted(r.items() , key=lambda lowercase__ : item[1]["count"] , reverse=lowercase__ ) )
return r
def _a ( lowercase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = test.split('::' )[0]
if test.startswith('tests/models/' ):
SCREAMING_SNAKE_CASE__ : int = test.split('/' )[2]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
return test
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Optional[Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = [(x[0], x[1], get_model(x[2] )) for x in logs]
SCREAMING_SNAKE_CASE__ : List[Any] = [x for x in logs if x[2] is not None]
SCREAMING_SNAKE_CASE__ : int = {x[2] for x in logs}
SCREAMING_SNAKE_CASE__ : List[str] = {}
for test in tests:
SCREAMING_SNAKE_CASE__ : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
SCREAMING_SNAKE_CASE__ : Optional[int] = counter.most_common()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
SCREAMING_SNAKE_CASE__ : Dict = sum(error_counts.values() )
if n_errors > 0:
SCREAMING_SNAKE_CASE__ : List[Any] = {'count': n_errors, 'errors': error_counts}
SCREAMING_SNAKE_CASE__ : Tuple = dict(sorted(r.items() , key=lambda lowercase__ : item[1]["count"] , reverse=lowercase__ ) )
return r
def _a ( lowercase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '| no. | error | status |'
SCREAMING_SNAKE_CASE__ : Dict = '|-:|:-|:-|'
SCREAMING_SNAKE_CASE__ : int = [header, sep]
for error in reduced_by_error:
SCREAMING_SNAKE_CASE__ : Optional[int] = reduced_by_error[error]['count']
SCREAMING_SNAKE_CASE__ : List[str] = f'''| {count} | {error[:1_00]} | |'''
lines.append(lowercase__ )
return "\n".join(lowercase__ )
def _a ( lowercase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = '| model | no. of errors | major error | count |'
SCREAMING_SNAKE_CASE__ : Tuple = '|-:|-:|-:|-:|'
SCREAMING_SNAKE_CASE__ : Any = [header, sep]
for model in reduced_by_model:
SCREAMING_SNAKE_CASE__ : List[str] = reduced_by_model[model]['count']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = list(reduced_by_model[model]['errors'].items() )[0]
SCREAMING_SNAKE_CASE__ : Tuple = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(lowercase__ )
return "\n".join(lowercase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_job_links(args.workflow_run_id, token=args.token)
SCREAMING_SNAKE_CASE__ : Any = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
SCREAMING_SNAKE_CASE__ : List[str] = k.find(" / ")
SCREAMING_SNAKE_CASE__ : Optional[int] = k[index + len(" / ") :]
SCREAMING_SNAKE_CASE__ : str = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
SCREAMING_SNAKE_CASE__ : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
SCREAMING_SNAKE_CASE__ : Dict = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
SCREAMING_SNAKE_CASE__ : Tuple = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
SCREAMING_SNAKE_CASE__ : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
SCREAMING_SNAKE_CASE__ : List[Any] = reduce_by_error(errors)
SCREAMING_SNAKE_CASE__ : Dict = reduce_by_model(errors)
SCREAMING_SNAKE_CASE__ : Any = make_github_table(reduced_by_error)
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 636 | from __future__ import annotations
def _a ( lowercase__ : list[int | float] , lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
if len(lowercase__ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(lowercase__ )
or left < -len(lowercase__ )
or right >= len(lowercase__ )
or right < -len(lowercase__ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : int = find_max(lowercase__ , lowercase__ , lowercase__ ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Tuple = find_max(lowercase__ , mid + 1 , lowercase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 636 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
SCREAMING_SNAKE_CASE__ : Tuple = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _a ( lowercase__ : str , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str ):
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(lowercase__ , lowercase__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ : str = getattr(lowercase__ , lowercase__ ).shape
else:
SCREAMING_SNAKE_CASE__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ : int = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ : Tuple = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ : Tuple = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ : List[Any] = value
elif weight_type == "running_mean":
SCREAMING_SNAKE_CASE__ : Any = value
elif weight_type == "running_var":
SCREAMING_SNAKE_CASE__ : Optional[int] = value
elif weight_type == "num_batches_tracked":
SCREAMING_SNAKE_CASE__ : Tuple = value
elif weight_type == "inv_freq":
SCREAMING_SNAKE_CASE__ : Dict = value
else:
SCREAMING_SNAKE_CASE__ : Any = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _a ( lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : List[str] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ : int = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE__ : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE__ : int = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ : Tuple = name.split(lowercase__ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE__ : Optional[Any] = mapped_key.replace('*' , lowercase__ )
if "pos_bias_u" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = None
elif "pos_bias_v" in name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
elif "weight_g" in name:
SCREAMING_SNAKE_CASE__ : Dict = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE__ : Any = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'weight'
elif "running_mean" in name:
SCREAMING_SNAKE_CASE__ : int = 'running_mean'
elif "inv_freq" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = 'inv_freq'
elif "running_var" in name:
SCREAMING_SNAKE_CASE__ : str = 'running_var'
elif "num_batches_tracked" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = 'num_batches_tracked'
else:
SCREAMING_SNAKE_CASE__ : Any = None
set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
continue
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _a ( lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE__ : Dict = name.split('.' )
SCREAMING_SNAKE_CASE__ : List[str] = int(items[0] )
SCREAMING_SNAKE_CASE__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ : Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase__ )
@torch.no_grad()
def _a ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : List[Any]=None , lowercase__ : Tuple=None , lowercase__ : List[Any]=True ):
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE__ : str = WavaVecaConformerConfig.from_pretrained(lowercase__ , hidden_act='swish' )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
SCREAMING_SNAKE_CASE__ : List[str] = 'rotary'
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ : str = Dictionary.load(lowercase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ : Any = target_dict.pad_index
SCREAMING_SNAKE_CASE__ : List[str] = target_dict.bos_index
SCREAMING_SNAKE_CASE__ : str = target_dict.eos_index
SCREAMING_SNAKE_CASE__ : List[Any] = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(lowercase__ , 'vocab.json' )
if not os.path.isdir(lowercase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase__ ) )
return
os.makedirs(lowercase__ , exist_ok=lowercase__ )
SCREAMING_SNAKE_CASE__ : int = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Any = 1
with open(lowercase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaCTCTokenizer(
lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase__ , )
SCREAMING_SNAKE_CASE__ : List[str] = True if config.feat_extract_norm == 'layer' else False
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ )
processor.save_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaConformerForCTC(lowercase__ )
else:
SCREAMING_SNAKE_CASE__ : str = WavaVecaConformerForPreTraining(lowercase__ )
if is_finetuned:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ : Tuple = argparse.Namespace(task='audio_pretraining' )
SCREAMING_SNAKE_CASE__ : List[str] = fairseq.tasks.setup_task(lowercase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = model[0].eval()
recursively_load_weights(lowercase__ , lowercase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowercase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 636 | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _a ( lowercase__ : Any ):
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _a ( lowercase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _a ( lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = [state.process_index]
SCREAMING_SNAKE_CASE__ : Any = gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, f'''{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _a ( lowercase__ : int ):
'''simple docstring'''
if state.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.arange(state.num_processes ).to(state.device )
SCREAMING_SNAKE_CASE__ : Any = pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE__ : List[Any] = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = reduce(lowercase__ , 'sum' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}'''
def _a ( lowercase__ : int ):
'''simple docstring'''
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE__ : Any = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = reduce(lowercase__ , 'mean' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}'''
def _a ( lowercase__ : int ):
'''simple docstring'''
main()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = PartialState()
state.print(f'''State: {state}''' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 636 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['pixel_values']
def __init__( self : Optional[int] , a_ : bool = True , a_ : int = 32 , a_ : Dict=PILImageResampling.BILINEAR , a_ : bool = True , **a_ : int , )-> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = size_divisor
SCREAMING_SNAKE_CASE__ : Optional[int] = resample
super().__init__(**a_ )
def __lowercase( self : Tuple , a_ : np.ndarray , a_ : int , a_ : List[str] , a_ : Optional[ChannelDimension] = None , **a_ : Optional[Any] )-> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = get_image_size(a_ )
# Rounds the height and width down to the closest multiple of size_divisor
SCREAMING_SNAKE_CASE__ : int = height // size_divisor * size_divisor
SCREAMING_SNAKE_CASE__ : Optional[Any] = width // size_divisor * size_divisor
SCREAMING_SNAKE_CASE__ : int = resize(a_ , (new_h, new_w) , resample=a_ , data_format=a_ , **a_ )
return image
def __lowercase( self : Any , a_ : np.ndarray , a_ : float , a_ : Optional[ChannelDimension] = None , **a_ : Union[str, Any] )-> np.ndarray:
"""simple docstring"""
return rescale(image=a_ , scale=a_ , data_format=a_ , **a_ )
def __lowercase( self : List[Any] , a_ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , a_ : Optional[bool] = None , a_ : Optional[int] = None , a_ : Any=None , a_ : Optional[bool] = None , a_ : Optional[Union[TensorType, str]] = None , a_ : ChannelDimension = ChannelDimension.FIRST , **a_ : Tuple , )-> BatchFeature:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Dict = size_divisor if size_divisor is not None else self.size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
SCREAMING_SNAKE_CASE__ : Any = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(a_ ) for img in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Any = [self.resize(a_ , size_divisor=a_ , resample=a_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.rescale(a_ , scale=1 / 255 ) for image in images]
SCREAMING_SNAKE_CASE__ : str = [to_channel_dimension_format(a_ , a_ ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'pixel_values': images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 636 | import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : Any = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , a_ : Optional[int] , a_ : Dict=7 , a_ : Any=3 , a_ : Any=18 , a_ : int=30 , a_ : int=400 , a_ : List[Any]=None , a_ : int=True , a_ : int=True , a_ : Dict=None , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'height': 20, 'width': 20}
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Any = num_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE__ : List[str] = min_resolution
SCREAMING_SNAKE_CASE__ : Dict = max_resolution
SCREAMING_SNAKE_CASE__ : List[Any] = size
SCREAMING_SNAKE_CASE__ : Tuple = do_normalize
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_convert_rgb
SCREAMING_SNAKE_CASE__ : List[str] = [512, 1024, 2048, 4096]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __lowercase( self : Dict )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(a_ , stream=a_ ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PixaStructImageProcessor if is_vision_available() else None
def __lowercase( self : List[str] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = PixaStructImageProcessingTester(self )
@property
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , 'do_normalize' ) )
self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor_tester.prepare_dummy_image()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ : List[Any] = 2048
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(a_ , return_tensors='pt' , max_patches=a_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase( self : Any )-> Any:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
SCREAMING_SNAKE_CASE__ : int = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(a_ ):
SCREAMING_SNAKE_CASE__ : Dict = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
SCREAMING_SNAKE_CASE__ : List[Any] = 'Hello'
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processor(
a_ , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
SCREAMING_SNAKE_CASE__ : str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PixaStructImageProcessor if is_vision_available() else None
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = PixaStructImageProcessingTester(self , num_channels=4 )
SCREAMING_SNAKE_CASE__ : Dict = 3
@property
def __lowercase( self : Any )-> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase( self : Dict )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , 'do_normalize' ) )
self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) )
def __lowercase( self : str )-> Union[str, Any]:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 636 | 1 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _a ( lowercase__ : int ):
'''simple docstring'''
if is_torch_version('<' , '2.0.0' ) or not hasattr(lowercase__ , '_dynamo' ):
return False
return isinstance(lowercase__ , torch._dynamo.eval_frame.OptimizedModule )
def _a ( lowercase__ : Optional[Any] , lowercase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE__ : Dict = is_compiled_module(lowercase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : Tuple = model
SCREAMING_SNAKE_CASE__ : int = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Any = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(lowercase__ , 'forward' )
SCREAMING_SNAKE_CASE__ : str = model.__dict__.pop('_original_forward' , lowercase__ )
if original_forward is not None:
while hasattr(lowercase__ , '__wrapped__' ):
SCREAMING_SNAKE_CASE__ : Dict = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE__ : Dict = forward
if getattr(lowercase__ , '_converted_to_transformer_engine' , lowercase__ ):
convert_model(lowercase__ , to_transformer_engine=lowercase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : List[Any] = model
SCREAMING_SNAKE_CASE__ : Optional[Any] = compiled_model
return model
def _a ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _a ( lowercase__ : str , lowercase__ : Optional[Any] ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowercase__ , lowercase__ )
elif PartialState().local_process_index == 0:
torch.save(lowercase__ , lowercase__ )
@contextmanager
def _a ( **lowercase__ : str ):
'''simple docstring'''
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE__ : int = str(lowercase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if not hasattr(lowercase__ , '__qualname__' ) and not hasattr(lowercase__ , '__name__' ):
SCREAMING_SNAKE_CASE__ : Any = getattr(lowercase__ , '__class__' , lowercase__ )
if hasattr(lowercase__ , '__qualname__' ):
return obj.__qualname__
if hasattr(lowercase__ , '__name__' ):
return obj.__name__
return str(lowercase__ )
def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ):
'''simple docstring'''
for key, value in source.items():
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = destination.setdefault(lowercase__ , {} )
merge_dicts(lowercase__ , lowercase__ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = value
return destination
def _a ( lowercase__ : int = None ):
'''simple docstring'''
if port is None:
SCREAMING_SNAKE_CASE__ : int = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 636 | import heapq as hq
import math
from collections.abc import Iterator
class snake_case :
def __init__( self : str , a_ : str )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = str(id_ )
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : int , a_ : Tuple )-> Union[str, Any]:
"""simple docstring"""
return self.key < other.key
def __repr__( self : Any )-> Dict:
"""simple docstring"""
return self.id
def __lowercase( self : Optional[Any] , a_ : int )-> List[str]:
"""simple docstring"""
self.neighbors.append(a_ )
def __lowercase( self : int , a_ : int , a_ : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = weight
def _a ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Dict ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowercase__ )
graph[b - 1].add_edge(graph[a - 1] , lowercase__ )
def _a ( lowercase__ : list , lowercase__ : Vertex ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = []
for u in graph:
SCREAMING_SNAKE_CASE__ : Dict = math.inf
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : int = graph[:]
while q:
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(lowercase__ )
q.remove(lowercase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE__ : int = u
SCREAMING_SNAKE_CASE__ : Any = u.edges[v.id]
for i in range(1 , len(lowercase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _a ( lowercase__ : list , lowercase__ : Vertex ):
'''simple docstring'''
for u in graph:
SCREAMING_SNAKE_CASE__ : List[str] = math.inf
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : Tuple = list(lowercase__ )
hq.heapify(lowercase__ )
while h:
SCREAMING_SNAKE_CASE__ : Optional[int] = hq.heappop(lowercase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE__ : List[str] = u
SCREAMING_SNAKE_CASE__ : Dict = u.edges[v.id]
hq.heapify(lowercase__ )
for i in range(1 , len(lowercase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 636 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
SCREAMING_SNAKE_CASE__ : Optional[int] = "src/diffusers"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "."
# This is to make sure the diffusers module imported is the one in the repo.
SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = spec.loader.load_module()
def _a ( lowercase__ : List[Any] , lowercase__ : Optional[Any] ):
'''simple docstring'''
return line.startswith(lowercase__ ) or len(lowercase__ ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , lowercase__ ) is not None
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = object_name.split('.' )
SCREAMING_SNAKE_CASE__ : Tuple = 0
# First let's find the module where our object lives.
SCREAMING_SNAKE_CASE__ : str = parts[i]
while i < len(lowercase__ ) and not os.path.isfile(os.path.join(lowercase__ , f'''{module}.py''' ) ):
i += 1
if i < len(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(lowercase__ , parts[i] )
if i >= len(lowercase__ ):
raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(lowercase__ , f'''{module}.py''' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE__ : Tuple = f.readlines()
# Now let's find the class / func in the code!
SCREAMING_SNAKE_CASE__ : List[Any] = ''
SCREAMING_SNAKE_CASE__ : str = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowercase__ ) and re.search(rf'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowercase__ ):
raise ValueError(f''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
SCREAMING_SNAKE_CASE__ : Tuple = line_index
while line_index < len(lowercase__ ) and _should_continue(lines[line_index] , lowercase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE__ : Optional[int] = lines[start_index:line_index]
return "".join(lowercase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
SCREAMING_SNAKE_CASE__ : Any = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r"<FILL\s+[^>]*>")
def _a ( lowercase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = code.split('\n' )
SCREAMING_SNAKE_CASE__ : Tuple = 0
while idx < len(lowercase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowercase__ ):
return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = len(get_indent(lowercase__ ) ) > 0
if has_indent:
SCREAMING_SNAKE_CASE__ : int = f'''class Bla:\n{code}'''
SCREAMING_SNAKE_CASE__ : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=lowercase__ )
SCREAMING_SNAKE_CASE__ : int = black.format_str(lowercase__ , mode=lowercase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = style_docstrings_in_code(lowercase__ )
return result[len('class Bla:\n' ) :] if has_indent else result
def _a ( lowercase__ : str , lowercase__ : Any=False ):
'''simple docstring'''
with open(lowercase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE__ : Dict = f.readlines()
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Any = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = search.groups()
SCREAMING_SNAKE_CASE__ : Any = find_code_in_diffusers(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = get_indent(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = line_index + 1 if indent == theoretical_indent else line_index + 2
SCREAMING_SNAKE_CASE__ : Tuple = theoretical_indent
SCREAMING_SNAKE_CASE__ : Tuple = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
SCREAMING_SNAKE_CASE__ : Optional[int] = True
while line_index < len(lowercase__ ) and should_continue:
line_index += 1
if line_index >= len(lowercase__ ):
break
SCREAMING_SNAKE_CASE__ : Optional[Any] = lines[line_index]
SCREAMING_SNAKE_CASE__ : Tuple = _should_continue(lowercase__ , lowercase__ ) and re.search(f'''^{indent}# End copy''' , lowercase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = lines[start_index:line_index]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ''.join(lowercase__ )
# Remove any nested `Copied from` comments to avoid circular copies
SCREAMING_SNAKE_CASE__ : Tuple = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(lowercase__ ) is None]
SCREAMING_SNAKE_CASE__ : Optional[int] = '\n'.join(lowercase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowercase__ ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = replace_pattern.replace('with' , '' ).split(',' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [_re_replace_pattern.search(lowercase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = pattern.groups()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.sub(lowercase__ , lowercase__ , lowercase__ )
if option.strip() == "all-casing":
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.sub(obja.lower() , obja.lower() , lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = re.sub(obja.upper() , obja.upper() , lowercase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
SCREAMING_SNAKE_CASE__ : str = blackify(lines[start_index - 1] + theoretical_code )
SCREAMING_SNAKE_CASE__ : int = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
SCREAMING_SNAKE_CASE__ : List[str] = lines[:start_index] + [theoretical_code] + lines[line_index:]
SCREAMING_SNAKE_CASE__ : str = start_index + 1
if overwrite and len(lowercase__ ) > 0:
# Warn the user a file has been modified.
print(f'''Detected changes, rewriting {filename}.''' )
with open(lowercase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowercase__ )
return diffs
def _a ( lowercase__ : bool = False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = glob.glob(os.path.join(lowercase__ , '**/*.py' ) , recursive=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = []
for filename in all_files:
SCREAMING_SNAKE_CASE__ : List[str] = is_copy_consistent(lowercase__ , lowercase__ )
diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(lowercase__ ) > 0:
SCREAMING_SNAKE_CASE__ : List[str] = '\n'.join(lowercase__ )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 636 | def _a ( lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _a ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 636 | 1 |
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
SCREAMING_SNAKE_CASE__ : Any = remove_duplicates(key.upper() )
SCREAMING_SNAKE_CASE__ : Optional[int] = len(lowercase__ )
# First fill cipher with key characters
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {alphabet[i]: char for i, char in enumerate(lowercase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowercase__ ) , 26 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
SCREAMING_SNAKE_CASE__ : Any = alphabet[i - offset]
SCREAMING_SNAKE_CASE__ : str = char
return cipher_alphabet
def _a ( lowercase__ : str , lowercase__ : dict[str, str] ):
'''simple docstring'''
return "".join(cipher_map.get(lowercase__ , lowercase__ ) for ch in message.upper() )
def _a ( lowercase__ : str , lowercase__ : dict[str, str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowercase__ , lowercase__ ) for ch in message.upper() )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = input('Enter message to encode or decode: ' ).strip()
SCREAMING_SNAKE_CASE__ : Tuple = input('Enter keyword: ' ).strip()
SCREAMING_SNAKE_CASE__ : List[Any] = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
SCREAMING_SNAKE_CASE__ : List[Any] = create_cipher_map(lowercase__ )
print(func(lowercase__ , lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 636 | from math import factorial, radians
def _a ( lowercase__ : float , lowercase__ : int = 18 , lowercase__ : int = 10 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE__ : int = radians(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = angle_in_radians
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 636 | 1 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__( self : List[Any] , a_ : List[Any] , a_ : Tuple=13 , a_ : int=32 , a_ : Optional[Any]=2 , a_ : List[Any]=3 , a_ : str=16 , a_ : List[Any]=[1, 2, 1] , a_ : List[str]=[2, 2, 4] , a_ : str=2 , a_ : str=2.0 , a_ : Any=True , a_ : Optional[int]=0.0 , a_ : Dict=0.0 , a_ : int=0.1 , a_ : Optional[Any]="gelu" , a_ : Tuple=False , a_ : Any=True , a_ : int=0.02 , a_ : List[str]=1e-5 , a_ : List[str]=True , a_ : Optional[Any]=None , a_ : str=True , a_ : Optional[Any]=10 , a_ : int=8 , )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : Any = image_size
SCREAMING_SNAKE_CASE__ : Any = patch_size
SCREAMING_SNAKE_CASE__ : Any = num_channels
SCREAMING_SNAKE_CASE__ : Any = embed_dim
SCREAMING_SNAKE_CASE__ : List[Any] = depths
SCREAMING_SNAKE_CASE__ : List[Any] = num_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = window_size
SCREAMING_SNAKE_CASE__ : str = mlp_ratio
SCREAMING_SNAKE_CASE__ : Tuple = qkv_bias
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_norm
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = is_training
SCREAMING_SNAKE_CASE__ : str = scope
SCREAMING_SNAKE_CASE__ : str = use_labels
SCREAMING_SNAKE_CASE__ : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Any = encoder_stride
def __lowercase( self : Any )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def __lowercase( self : int )-> Union[str, Any]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowercase( self : List[str] , a_ : Optional[int] , a_ : List[Any] , a_ : List[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = SwinvaModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowercase( self : Optional[Any] , a_ : Optional[int] , a_ : Union[str, Any] , a_ : str )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = SwinvaForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Dict = 1
SCREAMING_SNAKE_CASE__ : Dict = SwinvaForMaskedImageModeling(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowercase( self : int , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : List[str] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : int = SwinvaForImageClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase( self : List[Any] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowercase_ = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=a_ , embed_dim=37 )
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def __lowercase( self : str )-> List[str]:
"""simple docstring"""
pass
def __lowercase( self : Any )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def __lowercase( self : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a_ )
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : str = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Any = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(**self._prepare_for_class(a_ , a_ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.attentions
SCREAMING_SNAKE_CASE__ : Optional[int] = len(self.model_tester.depths )
self.assertEqual(len(a_ ) , a_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : Tuple = config.window_size**2
SCREAMING_SNAKE_CASE__ : int = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**self._prepare_for_class(a_ , a_ ) )
SCREAMING_SNAKE_CASE__ : Tuple = outputs.attentions
self.assertEqual(len(a_ ) , a_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE__ : str = len(a_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : Any = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int = model(**self._prepare_for_class(a_ , a_ ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE__ : Dict = 2
self.assertEqual(out_len + added_hidden_states , len(a_ ) )
SCREAMING_SNAKE_CASE__ : int = outputs.attentions
self.assertEqual(len(a_ ) , a_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowercase( self : str , a_ : Tuple , a_ : List[str] , a_ : Dict , a_ : Optional[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**self._prepare_for_class(a_ , a_ ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a_ ) , a_ )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(a_ ) , a_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE__ : List[Any] = (
reshaped_hidden_states[0].view(a_ , a_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowercase( self : Tuple )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
self.check_hidden_states_output(a_ , a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : int = True
self.check_hidden_states_output(a_ , a_ , a_ , a_ )
def __lowercase( self : str )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[Any] = 3
SCREAMING_SNAKE_CASE__ : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE__ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Any = True
self.check_hidden_states_output(a_ , a_ , a_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Optional[int] = True
self.check_hidden_states_output(a_ , a_ , a_ , (padded_height, padded_width) )
def __lowercase( self : Dict )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def __lowercase( self : Dict )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def __lowercase( self : List[Any] )-> int:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = SwinvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowercase( self : Dict )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = _config_zero_init(a_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(config=a_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
@cached_property
def __lowercase( self : Optional[int] )-> str:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def __lowercase( self : Optional[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
a_ )
SCREAMING_SNAKE_CASE__ : int = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(images=a_ , return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**a_ )
# verify the logits
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
| 636 | import math
def _a ( lowercase__ : int ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
SCREAMING_SNAKE_CASE__ : Tuple = range(3 , int(math.sqrt(lowercase__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _a ( lowercase__ : List[str] , lowercase__ : Any=1 , **lowercase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = factor * value
SCREAMING_SNAKE_CASE__ : Dict = value
while not is_prime(lowercase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowercase__ )
return value
| 636 | 1 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
class snake_case ( UpperCamelCase_ ):
def __init__( self : Optional[int] , a_ : Union[List[ControlNetModel], Tuple[ControlNetModel]] )-> int:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[str] = nn.ModuleList(a_ )
def __lowercase( self : Tuple , a_ : torch.FloatTensor , a_ : Union[torch.Tensor, float, int] , a_ : torch.Tensor , a_ : List[torch.tensor] , a_ : List[float] , a_ : Optional[torch.Tensor] = None , a_ : Optional[torch.Tensor] = None , a_ : Optional[torch.Tensor] = None , a_ : Optional[Dict[str, Any]] = None , a_ : bool = False , a_ : bool = True , )-> Union[ControlNetOutput, Tuple]:
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(a_ , a_ , self.nets ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = controlnet(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a_ , a_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowercase( self : Optional[Any] , a_ : Union[str, os.PathLike] , a_ : bool = True , a_ : Callable = None , a_ : bool = False , a_ : Optional[str] = None , )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : int = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a_ , is_main_process=a_ , save_function=a_ , safe_serialization=a_ , variant=a_ , )
idx += 1
SCREAMING_SNAKE_CASE__ : List[str] = model_path_to_save + F'''_{idx}'''
@classmethod
def __lowercase( cls : str , a_ : Optional[Union[str, os.PathLike]] , **a_ : Optional[int] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : List[str] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pretrained_model_path
while os.path.isdir(a_ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ControlNetModel.from_pretrained(a_ , **a_ )
controlnets.append(a_ )
idx += 1
SCREAMING_SNAKE_CASE__ : Tuple = pretrained_model_path + F'''_{idx}'''
logger.info(F'''{len(a_ )} controlnets loaded from {pretrained_model_path}.''' )
if len(a_ ) == 0:
raise ValueError(
F'''No ControlNets found under {os.path.dirname(a_ )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(a_ )
| 636 | import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case :
def __init__( self : str , a_ : List[str] , a_ : Tuple=13 , a_ : Dict=30 , a_ : Optional[int]=2 , a_ : Tuple=3 , a_ : Dict=True , a_ : int=True , a_ : Optional[Any]=32 , a_ : List[str]=5 , a_ : Any=4 , a_ : Dict=37 , a_ : Dict="gelu" , a_ : int=0.1 , a_ : Optional[Any]=0.1 , a_ : Any=10 , a_ : List[str]=0.02 , a_ : Any=3 , a_ : List[str]=None , a_ : Optional[int]=2 , )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : int = batch_size
SCREAMING_SNAKE_CASE__ : int = image_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = scope
SCREAMING_SNAKE_CASE__ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE__ : Optional[int] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_patches + 2
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowercase( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowercase( self : List[str] , a_ : List[str] , a_ : Optional[Any] , a_ : str )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = DeiTModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : List[Any] , a_ : List[str] , a_ : List[str] , a_ : List[Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = DeiTForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DeiTForMaskedImageModeling(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : int = model(a_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowercase( self : List[str] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = DeiTForImageClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : int = DeiTForImageClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase( self : int )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = DeiTModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
pass
def __lowercase( self : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def __lowercase( self : str )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a_ )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : List[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def __lowercase( self : str , a_ : str , a_ : Tuple , a_ : Union[str, Any]=False )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE__ : Tuple = model_class(a_ )
model.to(a_ )
model.train()
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**a_ ).loss
loss.backward()
def __lowercase( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**a_ ).loss
loss.backward()
def __lowercase( self : Optional[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[str] = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a_ ),
*get_values(a_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
SCREAMING_SNAKE_CASE__ : int = problem_type['title']
SCREAMING_SNAKE_CASE__ : Tuple = problem_type['num_labels']
SCREAMING_SNAKE_CASE__ : str = model_class(a_ )
model.to(a_ )
model.train()
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
SCREAMING_SNAKE_CASE__ : Any = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a_ ) as warning_list:
SCREAMING_SNAKE_CASE__ : str = model(**a_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __lowercase( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = DeiTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def __lowercase( self : int )-> Dict:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(images=a_ , return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**a_ )
# verify the logits
SCREAMING_SNAKE_CASE__ : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowercase( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(images=a_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : str = inputs.pixel_values.to(a_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
| 636 | 1 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger("transformers.models.speecht5")
SCREAMING_SNAKE_CASE__ : List[Any] = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
SCREAMING_SNAKE_CASE__ : List[str] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
SCREAMING_SNAKE_CASE__ : Any = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
SCREAMING_SNAKE_CASE__ : int = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
SCREAMING_SNAKE_CASE__ : int = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
SCREAMING_SNAKE_CASE__ : Dict = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
SCREAMING_SNAKE_CASE__ : int = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
SCREAMING_SNAKE_CASE__ : Any = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : List[str] = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
SCREAMING_SNAKE_CASE__ : Any = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
SCREAMING_SNAKE_CASE__ : int = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Union[str, Any] ):
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE__ : Tuple = getattr(lowercase__ , lowercase__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ : int = getattr(lowercase__ , lowercase__ ).shape
else:
SCREAMING_SNAKE_CASE__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ : List[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ : int = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ : str = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ : List[Any] = value
elif weight_type == "running_mean":
SCREAMING_SNAKE_CASE__ : Optional[int] = value
elif weight_type == "running_var":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
SCREAMING_SNAKE_CASE__ : Optional[int] = value
else:
SCREAMING_SNAKE_CASE__ : Tuple = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _a ( lowercase__ : List[Any] , lowercase__ : Tuple ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _a ( lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = []
if task == "s2t":
SCREAMING_SNAKE_CASE__ : str = hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE__ : Tuple = MAPPING_S2T
SCREAMING_SNAKE_CASE__ : List[Any] = IGNORE_KEYS_S2T
elif task == "t2s":
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = MAPPING_T2S
SCREAMING_SNAKE_CASE__ : int = IGNORE_KEYS_T2S
elif task == "s2s":
SCREAMING_SNAKE_CASE__ : Any = hf_model.speechta.encoder.prenet.feature_encoder
SCREAMING_SNAKE_CASE__ : str = MAPPING_S2S
SCREAMING_SNAKE_CASE__ : List[Any] = IGNORE_KEYS_S2S
else:
raise ValueError(f'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(lowercase__ , lowercase__ ):
logger.info(f'''{name} was ignored''' )
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = key.split('.*.' )
if prefix in name and suffix in name:
SCREAMING_SNAKE_CASE__ : Any = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
SCREAMING_SNAKE_CASE__ : Dict = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ : Dict = name.split(lowercase__ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE__ : int = mapped_key.replace('*' , lowercase__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ : int = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE__ : str = 'bias'
elif "weight" in name:
SCREAMING_SNAKE_CASE__ : List[str] = 'weight'
elif "running_mean" in name:
SCREAMING_SNAKE_CASE__ : Dict = 'running_mean'
elif "running_var" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = 'running_var'
elif "num_batches_tracked" in name:
SCREAMING_SNAKE_CASE__ : Any = 'num_batches_tracked'
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
continue
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE__ : Optional[int] = name.split('.' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(items[0] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ : Any = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ : int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ : Dict = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase__ )
@torch.no_grad()
def _a ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : List[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : Any=None , ):
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE__ : int = SpeechTaConfig.from_pretrained(lowercase__ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = SpeechTaConfig()
if task == "s2t":
SCREAMING_SNAKE_CASE__ : str = config.max_text_positions
SCREAMING_SNAKE_CASE__ : Optional[int] = SpeechTaForSpeechToText(lowercase__ )
elif task == "t2s":
SCREAMING_SNAKE_CASE__ : Tuple = 18_76
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 6_00
SCREAMING_SNAKE_CASE__ : int = config.max_speech_positions
SCREAMING_SNAKE_CASE__ : List[str] = SpeechTaForTextToSpeech(lowercase__ )
elif task == "s2s":
SCREAMING_SNAKE_CASE__ : Any = 18_76
SCREAMING_SNAKE_CASE__ : Dict = config.max_speech_positions
SCREAMING_SNAKE_CASE__ : List[Any] = SpeechTaForSpeechToSpeech(lowercase__ )
else:
raise ValueError(f'''Unknown task name: {task}''' )
if vocab_path:
SCREAMING_SNAKE_CASE__ : Dict = SpeechTaTokenizer(lowercase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AddedToken('<mask>' , lstrip=lowercase__ , rstrip=lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
SCREAMING_SNAKE_CASE__ : List[Any] = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = SpeechTaProcessor(tokenizer=lowercase__ , feature_extractor=lowercase__ )
processor.save_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = torch.load(lowercase__ )
recursively_load_weights(fairseq_checkpoint['model'] , lowercase__ , lowercase__ )
model.save_pretrained(lowercase__ )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(lowercase__ )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 636 | import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self : List[Any] , a_ : Dict , a_ : Any=13 , a_ : Any=7 , a_ : Tuple=True , a_ : Tuple=True , a_ : Optional[int]=False , a_ : Dict=True , a_ : Optional[Any]=99 , a_ : Any=32 , a_ : Dict=5 , a_ : Tuple=4 , a_ : List[str]=37 , a_ : Union[str, Any]="gelu" , a_ : Dict=0.1 , a_ : Tuple=0.1 , a_ : List[str]=512 , a_ : List[str]=16 , a_ : List[str]=2 , a_ : Optional[int]=0.02 , a_ : List[str]=3 , a_ : Union[str, Any]=4 , a_ : Optional[Any]=None , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Dict = batch_size
SCREAMING_SNAKE_CASE__ : Dict = seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = num_labels
SCREAMING_SNAKE_CASE__ : Dict = num_choices
SCREAMING_SNAKE_CASE__ : str = scope
def __lowercase( self : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
def __lowercase( self : Any , a_ : str , a_ : Tuple , a_ : Dict , a_ : Optional[int] , a_ : List[Any] , a_ : Union[str, Any] , a_ : Tuple )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BioGptModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : List[Any] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Tuple , a_ : Optional[Any] , a_ : int , a_ : Optional[int] , a_ : int , a_ : str , a_ : Optional[Any] , )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = BioGptForCausalLM(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase( self : Tuple , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : Any , a_ : Optional[int] , *a_ : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = BioGptModel(config=a_ )
model.to(a_ )
model.eval()
# create attention mask
SCREAMING_SNAKE_CASE__ : Any = torch.ones(input_ids.shape , dtype=torch.long , device=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.seq_length // 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
# first forward pass
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
SCREAMING_SNAKE_CASE__ : str = ids_tensor((1,) , a_ ).item() + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = random_other_next_tokens
# append to next input_ids and attn_mask
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Dict = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=a_ )] , dim=1 , )
# get two different outputs
SCREAMING_SNAKE_CASE__ : str = model(a_ , attention_mask=a_ )['last_hidden_state']
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , past_key_values=a_ , attention_mask=a_ )['last_hidden_state']
# select random slice
SCREAMING_SNAKE_CASE__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : str , a_ : List[Any] , a_ : str , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[Any] , *a_ : List[str] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel(config=a_ ).to(a_ ).eval()
SCREAMING_SNAKE_CASE__ : Dict = torch.ones(input_ids.shape , dtype=torch.long , device=a_ )
# first forward pass
SCREAMING_SNAKE_CASE__ : Any = model(a_ , attention_mask=a_ , use_cache=a_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(a_ , attention_mask=a_ )['last_hidden_state']
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , attention_mask=a_ , past_key_values=a_ )[
'last_hidden_state'
]
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : Any , a_ : List[str] , a_ : Optional[int] , a_ : Any , a_ : Tuple , a_ : Any , *a_ : List[Any] , a_ : Union[str, Any]=False )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = BioGptForCausalLM(a_ )
model.to(a_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __lowercase( self : Union[str, Any] , a_ : List[str] , *a_ : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = BioGptModel(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __lowercase( self : Dict , a_ : Tuple , a_ : Tuple , a_ : List[str] , a_ : Any , a_ : str , *a_ : str )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.num_labels
SCREAMING_SNAKE_CASE__ : str = BioGptForTokenClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , attention_mask=a_ , token_type_ids=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase( self : Any )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowercase_ = (BioGptForCausalLM,) if is_torch_available() else ()
lowercase_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = False
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def __lowercase( self : Tuple )-> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : List[str] = type
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : int )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*a_ )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*a_ , gradient_checkpointing=a_ )
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*a_ )
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*a_ )
def __lowercase( self : str )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*a_ )
@slow
def __lowercase( self : List[str] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(a_ )
SCREAMING_SNAKE_CASE__ : Dict = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE__ : List[str] = 'left'
# Define PAD Token = EOS Token = 50256
SCREAMING_SNAKE_CASE__ : Any = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : Tuple = model.config.eos_token_id
# use different length sentences to test batching
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'Hello, my dog is a little',
'Today, I',
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(a_ , return_tensors='pt' , padding=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = inputs['input_ids'].to(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = model.generate(
input_ids=a_ , attention_mask=inputs['attention_mask'].to(a_ ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(a_ )
SCREAMING_SNAKE_CASE__ : Dict = model.generate(input_ids=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(input_ids=a_ , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(a_ , a_ )
self.assertListEqual(a_ , [non_padded_sentence, padded_sentence] )
@slow
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowercase( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] = 3
SCREAMING_SNAKE_CASE__ : List[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.ne(1 ).to(a_ )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : int = BioGptForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : str = 3
SCREAMING_SNAKE_CASE__ : Any = 'multi_label_classification'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE__ : Any = input_ids.ne(1 ).to(a_ )
SCREAMING_SNAKE_CASE__ : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ : Dict = BioGptForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def __lowercase( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([[2, 4805, 9, 656, 21]] )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ )[0]
SCREAMING_SNAKE_CASE__ : List[str] = 4_2384
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , a_ )
SCREAMING_SNAKE_CASE__ : int = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
@slow
def __lowercase( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE__ : Dict = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(a_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer('COVID-19 is' , return_tensors='pt' ).to(a_ )
SCREAMING_SNAKE_CASE__ : int = model.generate(
**a_ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=a_ , )
SCREAMING_SNAKE_CASE__ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(a_ , a_ )
| 636 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[str] = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = ["MobileNetV2FeatureExtractor"]
SCREAMING_SNAKE_CASE__ : Any = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 636 | import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : Optional[Any] = random.Random()
def _a ( lowercase__ : List[str] , lowercase__ : List[Any]=1.0 , lowercase__ : Optional[int]=None , lowercase__ : List[str]=None ):
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = global_rng
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , a_ : Optional[Any] , a_ : Union[str, Any]=7 , a_ : Any=400 , a_ : List[Any]=2000 , a_ : Tuple=1 , a_ : Optional[int]=0.0 , a_ : Optional[Any]=1_6000 , a_ : str=True , a_ : Union[str, Any]=80 , a_ : Dict=16 , a_ : Tuple=64 , a_ : Any="hann_window" , a_ : Union[str, Any]=80 , a_ : List[Any]=7600 , a_ : Optional[Any]=1e-1_0 , a_ : Dict=True , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = min_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = max_seq_length
SCREAMING_SNAKE_CASE__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__ : int = feature_size
SCREAMING_SNAKE_CASE__ : str = padding_value
SCREAMING_SNAKE_CASE__ : Any = sampling_rate
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : int = num_mel_bins
SCREAMING_SNAKE_CASE__ : int = hop_length
SCREAMING_SNAKE_CASE__ : str = win_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = win_function
SCREAMING_SNAKE_CASE__ : List[str] = fmin
SCREAMING_SNAKE_CASE__ : Dict = fmax
SCREAMING_SNAKE_CASE__ : int = mel_floor
SCREAMING_SNAKE_CASE__ : Tuple = return_attention_mask
def __lowercase( self : Dict )-> Dict:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __lowercase( self : List[Any] , a_ : str=False , a_ : List[Any]=False )-> Optional[Any]:
"""simple docstring"""
def _flatten(a_ : int ):
return list(itertools.chain(*a_ ) )
if equal_length:
SCREAMING_SNAKE_CASE__ : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : int = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
def __lowercase( self : Any , a_ : int=False , a_ : Any=False )-> Union[str, Any]:
"""simple docstring"""
if equal_length:
SCREAMING_SNAKE_CASE__ : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Tuple = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : List[str] = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = SpeechTaFeatureExtractor
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = SpeechTaFeatureExtractionTester(self )
def __lowercase( self : Any , a_ : Optional[int] )-> List[str]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_ , axis=0 ) - 1 ) < 1e-3 ) )
def __lowercase( self : Tuple )-> Dict:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ : Tuple = [None, 1600, None]
for max_length, padding in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : str = feat_extract(a_ , padding=a_ , max_length=a_ , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : List[Any] = range(800 , 1400 , 200 )
SCREAMING_SNAKE_CASE__ : int = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , max_length=a_ , padding=a_ )
SCREAMING_SNAKE_CASE__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __lowercase( self : int )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract(
a_ , truncation=a_ , max_length=1000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(
a_ , truncation=a_ , max_length=1000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : str = feat_extract(
a_ , truncation=a_ , max_length=2000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __lowercase( self : Any )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__ : Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Dict = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(audio_target=a_ , padding=a_ , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : int = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : str = feature_extractor(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : Dict )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_ , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE__ : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowercase( self : Tuple )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Dict = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : str = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : List[Any] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : Any = [len(a_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a_ )
def __lowercase( self : str )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : Tuple = [len(a_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : str = min(a_ )
SCREAMING_SNAKE_CASE__ : Any = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : int = feat_extract.pad(
a_ , padding='max_length' , max_length=a_ , truncation=a_ , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __lowercase( self : Optional[int] , a_ : List[str] )-> Any:
"""simple docstring"""
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : int = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__ : List[Any] = ds.sort('id' ).select(range(a_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowercase( self : List[str] )-> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
SCREAMING_SNAKE_CASE__ : List[str] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(a_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , a_ , atol=1e-6 ) )
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : int = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : str = feature_extractor(audio_target=a_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a_ , atol=1e-4 ) )
| 636 | 1 |
def _a ( lowercase__ : Tuple , lowercase__ : str , lowercase__ : str , lowercase__ : Dict ):
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , lowercase__ , lowercase__ , lowercase__ )
move_disk(lowercase__ , lowercase__ )
move_tower(height - 1 , lowercase__ , lowercase__ , lowercase__ )
def _a ( lowercase__ : Tuple , lowercase__ : Tuple ):
'''simple docstring'''
print('moving disk from' , lowercase__ , 'to' , lowercase__ )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(input('Height of hanoi: ' ).strip() )
move_tower(lowercase__ , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 636 | import math
import sys
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ''
try:
with open(lowercase__ , 'rb' ) as binary_file:
SCREAMING_SNAKE_CASE__ : Tuple = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE__ : Tuple = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = '', ''
SCREAMING_SNAKE_CASE__ : Tuple = len(lowercase__ )
for i in range(len(lowercase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE__ : int = lexicon[curr_string]
result += last_match_id
SCREAMING_SNAKE_CASE__ : str = last_match_id + '0'
if math.loga(lowercase__ ).is_integer():
SCREAMING_SNAKE_CASE__ : List[str] = {}
for curr_key in list(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = lexicon.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = new_lex
SCREAMING_SNAKE_CASE__ : Any = last_match_id + '1'
index += 1
SCREAMING_SNAKE_CASE__ : Tuple = ''
return result
def _a ( lowercase__ : str , lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = 8
try:
with open(lowercase__ , 'wb' ) as opened_file:
SCREAMING_SNAKE_CASE__ : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(lowercase__ ) , lowercase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowercase__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = data_bits[counter:]
SCREAMING_SNAKE_CASE__ : int = data_bits[counter + 1 :]
return data_bits
def _a ( lowercase__ : str , lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = read_file_binary(lowercase__ )
SCREAMING_SNAKE_CASE__ : Dict = remove_prefix(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = decompress_data(lowercase__ )
write_file_binary(lowercase__ , lowercase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 636 | 1 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE__ : Tuple = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _a ( lowercase__ : str = "dhaka" , lowercase__ : int = 5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = min(lowercase__ , 50 ) # Prevent abuse!
SCREAMING_SNAKE_CASE__ : List[str] = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = requests.get('https://www.google.com/search' , params=lowercase__ , headers=lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = BeautifulSoup(html.text , 'html.parser' )
SCREAMING_SNAKE_CASE__ : str = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = json.dumps(lowercase__ )
SCREAMING_SNAKE_CASE__ : Dict = json.loads(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , lowercase__ , )
if not matched_google_image_data:
return 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(lowercase__ ) , )
SCREAMING_SNAKE_CASE__ : List[str] = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , lowercase__ , )
for index, fixed_full_res_image in enumerate(lowercase__ ):
if index >= max_images:
return index
SCREAMING_SNAKE_CASE__ : Dict = bytes(lowercase__ , 'ascii' ).decode(
'unicode-escape' )
SCREAMING_SNAKE_CASE__ : List[Any] = bytes(lowercase__ , 'ascii' ).decode(
'unicode-escape' )
SCREAMING_SNAKE_CASE__ : int = urllib.request.build_opener()
SCREAMING_SNAKE_CASE__ : int = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = f'''query_{query.replace(' ' , '_' )}'''
if not os.path.exists(lowercase__ ):
os.makedirs(lowercase__ )
urllib.request.urlretrieve( # noqa: S310
lowercase__ , f'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] = download_images_from_google_query(sys.argv[1])
print(F"""{image_count} images were downloaded to disk.""")
except IndexError:
print("Please provide a search term.")
raise
| 636 | def _a ( lowercase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : List[Any] = set({'(', '[', '{'} )
SCREAMING_SNAKE_CASE__ : Optional[int] = set({')', ']', '}'} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'{': '}', '[': ']', '(': ')'}
for i in range(len(lowercase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowercase__ ) == 0 or (len(lowercase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowercase__ ) == 0
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = input('Enter sequence of brackets: ' )
if is_balanced(lowercase__ ):
print(lowercase__ , 'is balanced' )
else:
print(lowercase__ , 'is not balanced' )
if __name__ == "__main__":
main()
| 636 | 1 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case ( unittest.TestCase ):
def __lowercase( self : Tuple )-> Optional[Any]:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE__ : Optional[int] = mock.Mock()
SCREAMING_SNAKE_CASE__ : int = 500
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : List[Any] = HTTPError
SCREAMING_SNAKE_CASE__ : List[str] = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ : List[str] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=a_ ) as mock_head:
SCREAMING_SNAKE_CASE__ : Tuple = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __lowercase( self : List[Any] )-> int:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE__ : Tuple = mock.Mock()
SCREAMING_SNAKE_CASE__ : List[str] = 500
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : List[str] = HTTPError
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=a_ ) as mock_head:
SCREAMING_SNAKE_CASE__ : Any = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
try:
SCREAMING_SNAKE_CASE__ : str = tempfile.mktemp()
with open(a_ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = AlbertTokenizer.from_pretrained(a_ )
finally:
os.remove(a_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , a_ )
SCREAMING_SNAKE_CASE__ : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE__ : List[Any] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class snake_case ( unittest.TestCase ):
lowercase_ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def __lowercase( cls : Tuple )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = TOKEN
HfFolder.save_token(a_ )
@classmethod
def __lowercase( cls : Any )-> Dict:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(a_ , 'vocab.txt' )
with open(a_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Tuple = BertTokenizer(a_ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a_ , repo_id='test-tokenizer' , push_to_hub=a_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Dict = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __lowercase( self : Optional[int] )-> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(a_ , 'vocab.txt' )
with open(a_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Dict = BertTokenizer(a_ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
a_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=a_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : int = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(a_ , 'vocab.txt' )
with open(a_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : int = CustomTokenizer(a_ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(a_ , 'vocab.txt' )
with open(a_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : int = BertTokenizerFast.from_pretrained(a_ )
bert_tokenizer.save_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Any = CustomTokenizerFast.from_pretrained(a_ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=a_ , trust_remote_code=a_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class snake_case ( unittest.TestCase ):
def __lowercase( self : List[str] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def __lowercase( self : Optional[int] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def __lowercase( self : int )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def __lowercase( self : Tuple )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __lowercase( self : Dict )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __lowercase( self : Optional[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def __lowercase( self : Tuple )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def __lowercase( self : Tuple )-> Optional[Any]:
"""simple docstring"""
# Even if the offsets are wrong, we necessarily output correct string
# parts.
SCREAMING_SNAKE_CASE__ : List[Any] = Trie()
SCREAMING_SNAKE_CASE__ : Optional[int] = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(a_ , ['AB', 'C'] )
| 636 | import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : List[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : int )-> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : List[Any] = PegasusTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def __lowercase( self : Any , **a_ : Optional[Any] )-> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Union[str, Any] , a_ : List[Any] )-> Optional[int]:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowercase( self : Optional[int] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = '</s>'
SCREAMING_SNAKE_CASE__ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def __lowercase( self : Dict )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(a_ ) , 1103 )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def __lowercase( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE__ : Any = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
SCREAMING_SNAKE_CASE__ : List[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
def __lowercase( self : int )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
SCREAMING_SNAKE_CASE__ : int = 'To ensure a smooth flow of bank resolutions.'
SCREAMING_SNAKE_CASE__ : List[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ['This is going to be way too long.' * 150, 'short example']
SCREAMING_SNAKE_CASE__ : int = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._large_tokenizer(
text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a_ ) == 2 # input_ids, attention_mask.
@slow
def __lowercase( self : Any )-> str:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : Any )-> Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[int] = PegasusTokenizer(a_ , offset=0 , mask_token_sent=a_ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase( self : Optional[Any] )-> List[str]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def __lowercase( self : List[str] , **a_ : Optional[Any] )-> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Optional[Any] , a_ : Tuple )-> str:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowercase( self : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE__ : str = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
SCREAMING_SNAKE_CASE__ : str = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
@require_torch
def __lowercase( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ['This is going to be way too long.' * 1000, 'short example']
SCREAMING_SNAKE_CASE__ : Optional[int] = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE__ : str = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer(
text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a_ ) == 2 # input_ids, attention_mask.
def __lowercase( self : Dict )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._large_tokenizer(a_ ).input_ids
self.assertListEqual(
a_ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 636 | 1 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class snake_case :
def __init__( self : Any , a_ : int , a_ : Union[str, Any]=3 , a_ : int=7 , a_ : List[Any]=True , a_ : Dict=True , a_ : Union[str, Any]=False , a_ : List[str]=True , a_ : int=99 , a_ : Any=32 , a_ : Tuple=5 , a_ : List[Any]=4 , a_ : str=37 , a_ : int="gelu" , a_ : Any=0.1 , a_ : Optional[Any]=0.1 , a_ : int=512 , a_ : str=16 , a_ : List[str]=2 , a_ : Dict=0.02 , a_ : str=3 , a_ : Optional[Any]=4 , a_ : Optional[Any]=None , )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = parent
SCREAMING_SNAKE_CASE__ : Any = batch_size
SCREAMING_SNAKE_CASE__ : Dict = seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Dict = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = num_labels
SCREAMING_SNAKE_CASE__ : Tuple = num_choices
SCREAMING_SNAKE_CASE__ : Tuple = scope
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=a_ , )
def __lowercase( self : int , a_ : Tuple , a_ : List[Any] , a_ : str , a_ : str , a_ : Dict , a_ : List[Any] , a_ : Any )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = FalconModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , attention_mask=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : Tuple , a_ : Optional[Any] , a_ : Tuple , a_ : Any , a_ : Tuple , a_ : Optional[int] , a_ : Optional[int] , a_ : int , a_ : Union[str, Any] , a_ : Dict , )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Optional[int] = FalconModel(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
SCREAMING_SNAKE_CASE__ : Any = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , )
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , attention_mask=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : Optional[Any] , a_ : List[Any] , a_ : Optional[int] , a_ : int , a_ : Dict , a_ : Optional[int] , a_ : Tuple , a_ : Optional[Any] , a_ : Tuple , a_ : List[str] , )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = FalconForCausalLM(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase( self : Any , a_ : Optional[Any] , a_ : List[str] , a_ : Union[str, Any] , a_ : Any , a_ : Dict , a_ : Any , a_ : Dict , a_ : List[str] , a_ : Optional[Any] , )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : List[Any] = FalconForCausalLM(config=a_ )
model.to(a_ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE__ : Dict = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , use_cache=a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ : List[Any] = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , output_hidden_states=a_ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE__ : List[str] = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , past_key_values=a_ , output_hidden_states=a_ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : int )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ = (FalconForCausalLM,) if is_torch_available() else ()
lowercase_ = (
{
'feature-extraction': FalconModel,
'text-classification': FalconForSequenceClassification,
'text-generation': FalconForCausalLM,
'question-answering': FalconForQuestionAnswering,
'token-classification': FalconForTokenClassification,
'zero-shot': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
def __lowercase( self : Tuple )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = FalconModelTester(self )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def __lowercase( self : int )-> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase( self : List[str] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = alibi
self.model_tester.create_and_check_model(a_ , *a_ )
def __lowercase( self : int )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = input_dict['input_ids']
SCREAMING_SNAKE_CASE__ : List[Any] = input_ids.ne(1 ).to(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Any = FalconForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowercase( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'single_label_classification'
SCREAMING_SNAKE_CASE__ : Tuple = input_dict['input_ids']
SCREAMING_SNAKE_CASE__ : str = input_ids.ne(1 ).to(a_ )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Any = FalconForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE__ : Optional[Any] = FalconForCausalLM(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , use_cache=a_ )
SCREAMING_SNAKE_CASE__ : str = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = model._convert_to_rw_cache(result.past_key_values )
SCREAMING_SNAKE_CASE__ : Tuple = model._convert_cache_to_standard_format(a_ , a_ )
for layer in range(len(a_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __lowercase( self : int )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 3
SCREAMING_SNAKE_CASE__ : Tuple = 'multi_label_classification'
SCREAMING_SNAKE_CASE__ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE__ : int = input_ids.ne(1 ).to(a_ )
SCREAMING_SNAKE_CASE__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ : Any = FalconForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowercase( self : Tuple )-> Optional[int]:
"""simple docstring"""
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(a_ , 'use_cache' ):
return
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(a_ ).to(a_ )
if "use_cache" not in inputs:
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : List[Any] = model(**a_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
SCREAMING_SNAKE_CASE__ : Any = (
getattr(a_ , 'decoder_layers' , a_ )
or getattr(a_ , 'num_decoder_layers' , a_ )
or config.num_hidden_layers
)
SCREAMING_SNAKE_CASE__ : Dict = getattr(a_ , 'num_kv_heads' , config.num_attention_heads )
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(a_ , 'd_model' , config.hidden_size )
SCREAMING_SNAKE_CASE__ : List[str] = embed_dim // num_attention_heads
SCREAMING_SNAKE_CASE__ : int = outputs['past_key_values']
self.assertEqual(len(a_ ) , a_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = inputs['input_ids'].shape
for i in range(a_ ):
if config.new_decoder_architecture:
SCREAMING_SNAKE_CASE__ : Optional[int] = config.num_attention_heads
elif config.multi_query:
SCREAMING_SNAKE_CASE__ : Dict = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
SCREAMING_SNAKE_CASE__ : Tuple = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer('My favorite food is' , return_tensors='pt' ).to(a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.generate(**a_ , do_sample=a_ , max_new_tokens=19 )
SCREAMING_SNAKE_CASE__ : str = tokenizer.batch_decode(a_ )[0]
self.assertEqual(a_ , a_ )
@slow
def __lowercase( self : Optional[int] )-> int:
"""simple docstring"""
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Dict = FalconForCausalLM.from_pretrained(a_ )
model.eval()
model.to(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer('My favorite food is' , return_tensors='pt' ).to(a_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**a_ , do_sample=a_ , max_new_tokens=4 )
model.generate(**a_ , do_sample=a_ , max_new_tokens=4 )
model.generate(**a_ , num_beams=2 , max_new_tokens=4 )
@slow
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = FalconForCausalLM.from_pretrained(a_ )
model.eval()
model.to(device=a_ )
SCREAMING_SNAKE_CASE__ : Any = tokenizer('My favorite food is' , return_tensors='pt' ).to(a_ )
# Test results are the same with and without cache
SCREAMING_SNAKE_CASE__ : Dict = model.generate(**a_ , do_sample=a_ , max_new_tokens=20 , use_cache=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = model.generate(**a_ , do_sample=a_ , max_new_tokens=20 , use_cache=a_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 636 | def _a ( lowercase__ : int = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowercase__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 636 | 1 |
def _a ( lowercase__ : int ):
'''simple docstring'''
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 636 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any]=False , lowercase__ : str=False , lowercase__ : Dict=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _a ( lowercase__ : List[str] , lowercase__ : Dict ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ : Dict = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_bias[-config.hidden_size :]
def _a ( lowercase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _a ( lowercase__ : int , lowercase__ : int , lowercase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = dct.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = val
@torch.no_grad()
def _a ( lowercase__ : Dict , lowercase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : str = False
if "vqa" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : str = 31_29
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ : int = 'vqa2-id2label.json'
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Dict = idalabel
SCREAMING_SNAKE_CASE__ : str = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[str] = ViltForQuestionAnswering(lowercase__ )
elif "nlvr" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Dict = {0: 'False', 1: 'True'}
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in config.idalabel.items()}
SCREAMING_SNAKE_CASE__ : Tuple = 3
SCREAMING_SNAKE_CASE__ : int = ViltForImagesAndTextClassification(lowercase__ )
elif "irtr" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : str = ViltForImageAndTextRetrieval(lowercase__ )
elif "mlm_itm" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Optional[int] = ViltForMaskedLM(lowercase__ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ : Any = torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )['state_dict']
SCREAMING_SNAKE_CASE__ : Any = create_rename_keys(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ )
if mlm_model or irtr_model:
SCREAMING_SNAKE_CASE__ : Any = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowercase__ )
# Define processor
SCREAMING_SNAKE_CASE__ : str = ViltImageProcessor(size=3_84 )
SCREAMING_SNAKE_CASE__ : List[Any] = BertTokenizer.from_pretrained('bert-base-uncased' )
SCREAMING_SNAKE_CASE__ : List[Any] = ViltProcessor(lowercase__ , lowercase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
SCREAMING_SNAKE_CASE__ : List[str] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw )
SCREAMING_SNAKE_CASE__ : Any = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw )
SCREAMING_SNAKE_CASE__ : Tuple = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
SCREAMING_SNAKE_CASE__ : List[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : List[str] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : List[Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
SCREAMING_SNAKE_CASE__ : Tuple = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=lowercase__ ).raw )
if mlm_model:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'a bunch of [MASK] laying on a [MASK].'
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'How many cats are there?'
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : str = model(**lowercase__ )
# Verify outputs
if mlm_model:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size([1, 11, 3_05_22] )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 )
# verify masked token prediction equals "cats"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
SCREAMING_SNAKE_CASE__ : str = torch.Size([1, 31_29] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 )
# verify vqa prediction equals "2"
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size([1, 2] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 636 | 1 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
SCREAMING_SNAKE_CASE__ : int = "bert-base-cased"
SCREAMING_SNAKE_CASE__ : List[Any] = "google/pegasus-xsum"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [" Sam ate lunch today.", "Sams lunch ingredients."]
SCREAMING_SNAKE_CASE__ : Any = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
SCREAMING_SNAKE_CASE__ : str = "patrickvonplaten/t5-tiny-random"
SCREAMING_SNAKE_CASE__ : Tuple = "sshleifer/bart-tiny-random"
SCREAMING_SNAKE_CASE__ : Any = "sshleifer/tiny-mbart"
SCREAMING_SNAKE_CASE__ : Optional[Any] = "sshleifer/tiny-marian-en-de"
def _a ( lowercase__ : Path , lowercase__ : list ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '\n'.join(lowercase__ )
Path(lowercase__ ).open('w' ).writelines(lowercase__ )
def _a ( lowercase__ : List[Any] ):
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowercase__ , f'''{split}.source''' ) , lowercase__ )
_dump_articles(os.path.join(lowercase__ , f'''{split}.target''' ) , lowercase__ )
return tmp_dir
class snake_case ( UpperCamelCase_ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __lowercase( self : Optional[Any] , a_ : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE__ : int = max(len(tokenizer.encode(a_ ) ) for a in ARTICLES )
SCREAMING_SNAKE_CASE__ : str = max(len(tokenizer.encode(a_ ) ) for a in SUMMARIES )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE__ : Dict = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
SCREAMING_SNAKE_CASE__ : Optional[int] = SeqaSeqDataset(
a_ , data_dir=a_ , type_path='train' , max_source_length=a_ , max_target_length=a_ , src_lang=a_ , tgt_lang=a_ , )
SCREAMING_SNAKE_CASE__ : Any = DataLoader(a_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(a_ , a_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
SCREAMING_SNAKE_CASE__ : Tuple = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __lowercase( self : Union[str, Any] , a_ : List[str] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : int = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE__ : Any = max(len(tokenizer.encode(a_ ) ) for a in ARTICLES )
SCREAMING_SNAKE_CASE__ : Any = max(len(tokenizer.encode(a_ ) ) for a in SUMMARIES )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE__ : str = LegacySeqaSeqDataset(
a_ , data_dir=a_ , type_path='train' , max_source_length=20 , max_target_length=a_ , )
SCREAMING_SNAKE_CASE__ : List[Any] = DataLoader(a_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __lowercase( self : Optional[int] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
SCREAMING_SNAKE_CASE__ : Any = tmp_dir.joinpath('train.source' ).open().readlines()
SCREAMING_SNAKE_CASE__ : Optional[int] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(a_ , a_ , 128 , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = {x.name for x in tmp_dir.iterdir()}
SCREAMING_SNAKE_CASE__ : Dict = {x.name for x in save_dir.iterdir()}
SCREAMING_SNAKE_CASE__ : List[Any] = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(a_ ) < len(a_ )
assert len(a_ ) == 1
assert len(packed_examples[0] ) == sum(len(a_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __lowercase( self : int )-> Union[str, Any]:
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self._get_dataset(max_len=64 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 64
SCREAMING_SNAKE_CASE__ : int = ds.make_dynamic_sampler(a_ , required_batch_size_multiple=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = [len(a_ ) for x in batch_sampler]
assert len(set(a_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(a_ ) == len(a_ ) # no dropped or added examples
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DataLoader(a_ , batch_sampler=a_ , collate_fn=ds.collate_fn , num_workers=2 )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : List[Any] = []
for batch in data_loader:
SCREAMING_SNAKE_CASE__ : List[Any] = batch['input_ids'].shape
SCREAMING_SNAKE_CASE__ : int = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
SCREAMING_SNAKE_CASE__ : Any = np.product(batch['input_ids'].shape )
num_src_per_batch.append(a_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(a_ )
assert num_src_per_batch[0] == max(a_ )
if failures:
raise AssertionError(F'''too many tokens in {len(a_ )} batches''' )
def __lowercase( self : Dict )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self._get_dataset(max_len=512 )
SCREAMING_SNAKE_CASE__ : int = 2
SCREAMING_SNAKE_CASE__ : List[Any] = ds.make_sortish_sampler(a_ , shuffle=a_ )
SCREAMING_SNAKE_CASE__ : List[str] = DataLoader(a_ , batch_size=a_ , collate_fn=ds.collate_fn , num_workers=2 )
SCREAMING_SNAKE_CASE__ : List[str] = DataLoader(a_ , batch_size=a_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=a_ )
SCREAMING_SNAKE_CASE__ : str = tokenizer.pad_token_id
def count_pad_tokens(a_ : int , a_ : Dict="input_ids" ):
return [batch[k].eq(a_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(a_ , k='labels' ) ) < sum(count_pad_tokens(a_ , k='labels' ) )
assert sum(count_pad_tokens(a_ ) ) < sum(count_pad_tokens(a_ ) )
assert len(a_ ) == len(a_ )
def __lowercase( self : Optional[Any] , a_ : Union[str, Any]=1000 , a_ : Tuple=128 )-> Optional[int]:
"""simple docstring"""
if os.getenv('USE_REAL_DATA' , a_ ):
SCREAMING_SNAKE_CASE__ : Tuple = 'examples/seq2seq/wmt_en_ro'
SCREAMING_SNAKE_CASE__ : Tuple = max_len * 2 * 64
if not Path(a_ ).joinpath('train.len' ).exists():
save_len_file(a_ , a_ )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = 'examples/seq2seq/test_data/wmt_en_ro'
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_len * 4
save_len_file(a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = SeqaSeqDataset(
a_ , data_dir=a_ , type_path='train' , max_source_length=a_ , max_target_length=a_ , n_obs=a_ , )
return ds, max_tokens, tokenizer
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self._get_dataset()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = set(DistributedSortishSampler(a_ , 256 , num_replicas=2 , rank=0 , add_extra_examples=a_ ) )
SCREAMING_SNAKE_CASE__ : Any = set(DistributedSortishSampler(a_ , 256 , num_replicas=2 , rank=1 , add_extra_examples=a_ ) )
assert idsa.intersection(a_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __lowercase( self : Union[str, Any] , a_ : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained(a_ , use_fast=a_ )
if tok_name == MBART_TINY:
SCREAMING_SNAKE_CASE__ : Any = SeqaSeqDataset(
a_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
SCREAMING_SNAKE_CASE__ : Optional[int] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = SeqaSeqDataset(
a_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
SCREAMING_SNAKE_CASE__ : Tuple = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(a_ ) == 1 if tok_name == BART_TINY else len(a_ ) == 0
| 636 | from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case :
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
def __lowercase( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __lowercase( self : Dict )-> Union[str, Any]:
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __lowercase( self : Tuple )-> torch.Tensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = torch.arange(self.height * self.width )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(a_ , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.shape
SCREAMING_SNAKE_CASE__ : Tuple = int(np.prod(a_ ) )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_coords()
SCREAMING_SNAKE_CASE__ : Dict = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
SCREAMING_SNAKE_CASE__ : Any = self.get_camera_rays(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = rays.view(a_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __lowercase( self : Optional[Any] , a_ : torch.Tensor )-> torch.Tensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
SCREAMING_SNAKE_CASE__ : str = coords.view(a_ , -1 , 2 )
SCREAMING_SNAKE_CASE__ : List[Any] = self.resolution()
SCREAMING_SNAKE_CASE__ : str = self.fov()
SCREAMING_SNAKE_CASE__ : Any = (flat.float() / (res - 1)) * 2 - 1
SCREAMING_SNAKE_CASE__ : Any = fracs * torch.tan(fov / 2 )
SCREAMING_SNAKE_CASE__ : List[str] = fracs.view(a_ , -1 , 2 )
SCREAMING_SNAKE_CASE__ : str = (
self.z.view(a_ , 1 , 3 )
+ self.x.view(a_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(a_ , 1 , 3 ) * fracs[:, :, 1:]
)
SCREAMING_SNAKE_CASE__ : Tuple = directions / directions.norm(dim=-1 , keepdim=a_ )
SCREAMING_SNAKE_CASE__ : Any = torch.stack(
[
torch.broadcast_to(self.origin.view(a_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(a_ , *a_ , 2 , 3 )
def __lowercase( self : Optional[int] , a_ : int , a_ : int )-> "DifferentiableProjectiveCamera":
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=a_ , height=a_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def _a ( lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : str = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([np.sin(lowercase__ ), np.cos(lowercase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
SCREAMING_SNAKE_CASE__ : Tuple = -z * 4
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([np.cos(lowercase__ ), -np.sin(lowercase__ ), 0.0] )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.cross(lowercase__ , lowercase__ )
origins.append(lowercase__ )
xs.append(lowercase__ )
ys.append(lowercase__ )
zs.append(lowercase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , width=lowercase__ , height=lowercase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase__ )) , )
| 636 | 1 |
def _a ( lowercase__ : Tuple , lowercase__ : int , lowercase__ : List[Any]=False ):
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = len(set_a.intersection(lowercase__ ) )
if alternative_union:
SCREAMING_SNAKE_CASE__ : int = len(lowercase__ ) + len(lowercase__ )
else:
SCREAMING_SNAKE_CASE__ : Tuple = len(set_a.union(lowercase__ ) )
return intersection / union
if isinstance(lowercase__ , (list, tuple) ) and isinstance(lowercase__ , (list, tuple) ):
SCREAMING_SNAKE_CASE__ : Any = [element for element in set_a if element in set_b]
if alternative_union:
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(lowercase__ ) + len(lowercase__ )
return len(lowercase__ ) / union
else:
SCREAMING_SNAKE_CASE__ : Any = set_a + [element for element in set_b if element not in set_a]
return len(lowercase__ ) / len(lowercase__ )
return len(lowercase__ ) / len(lowercase__ )
return None
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = {"a", "b", "c", "d", "e"}
SCREAMING_SNAKE_CASE__ : Tuple = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 636 | import requests
SCREAMING_SNAKE_CASE__ : int = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(f'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 636 | 1 |
import math
import sys
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ''
try:
with open(lowercase__ , 'rb' ) as binary_file:
SCREAMING_SNAKE_CASE__ : Tuple = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE__ : Tuple = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = '', ''
SCREAMING_SNAKE_CASE__ : Tuple = len(lowercase__ )
for i in range(len(lowercase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE__ : int = lexicon[curr_string]
result += last_match_id
SCREAMING_SNAKE_CASE__ : str = last_match_id + '0'
if math.loga(lowercase__ ).is_integer():
SCREAMING_SNAKE_CASE__ : List[str] = {}
for curr_key in list(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = lexicon.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = new_lex
SCREAMING_SNAKE_CASE__ : Any = last_match_id + '1'
index += 1
SCREAMING_SNAKE_CASE__ : Tuple = ''
return result
def _a ( lowercase__ : str , lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = 8
try:
with open(lowercase__ , 'wb' ) as opened_file:
SCREAMING_SNAKE_CASE__ : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(lowercase__ ) , lowercase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowercase__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = data_bits[counter:]
SCREAMING_SNAKE_CASE__ : int = data_bits[counter + 1 :]
return data_bits
def _a ( lowercase__ : str , lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = read_file_binary(lowercase__ )
SCREAMING_SNAKE_CASE__ : Dict = remove_prefix(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = decompress_data(lowercase__ )
write_file_binary(lowercase__ , lowercase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 636 | import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger()
@dataclass
class snake_case :
lowercase_ = 42
lowercase_ = field(default_factory=UpperCamelCase_ )
lowercase_ = field(default_factory=UpperCamelCase_ )
def __lowercase( self : Dict , a_ : Dict , a_ : Tensor , a_ : Tensor )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = len(list(m.modules() ) ) == 1 or isinstance(a_ , nn.Convad ) or isinstance(a_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a_ )
def __call__( self : Tuple , a_ : Tensor )-> Any:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a_ )
[x.remove() for x in self.handles]
return self
@property
def __lowercase( self : Tuple )-> int:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class snake_case :
lowercase_ = 42
lowercase_ = 42
lowercase_ = 1
lowercase_ = field(default_factory=UpperCamelCase_ )
lowercase_ = field(default_factory=UpperCamelCase_ )
lowercase_ = True
def __call__( self : List[Any] , a_ : Tensor )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = Tracker(self.dest )(a_ ).parametrized
SCREAMING_SNAKE_CASE__ : Optional[int] = Tracker(self.src )(a_ ).parametrized
SCREAMING_SNAKE_CASE__ : List[str] = list(filter(lambda a_ : type(a_ ) not in self.src_skip , a_ ) )
SCREAMING_SNAKE_CASE__ : Dict = list(filter(lambda a_ : type(a_ ) not in self.dest_skip , a_ ) )
if len(a_ ) != len(a_ ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(a_ )} operations while'''
F''' destination module has {len(a_ )}.''' )
for dest_m, src_m in zip(a_ , a_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class snake_case ( nn.Module ):
def __init__( self : List[Any] , a_ : nn.Module )-> Dict:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), F'''Unexpected layer name {k}'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a_ ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
SCREAMING_SNAKE_CASE__ : Any = nn.ModuleDict(a_ )
def __lowercase( self : Tuple , a_ : Tensor )-> Dict:
"""simple docstring"""
return get_trunk_forward_outputs(
a_ , out_feat_keys=a_ , feature_blocks=self._feature_blocks , )
class snake_case ( UpperCamelCase_ ):
def __lowercase( self : Optional[Any] , a_ : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Union[str, Any] , a_ : str )-> Callable[[], Tuple[nn.Module, Dict]]:
"""simple docstring"""
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE__ : Any = self.convert_name_to_timm(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = partial(lambda: (timm.create_model(a_ , pretrained=a_ ).eval(), None) )
else:
SCREAMING_SNAKE_CASE__ : List[str] = super().__getitem__(a_ )
return val
class snake_case ( UpperCamelCase_ ):
def __getitem__( self : Any , a_ : str )-> Callable[[], nn.Module]:
"""simple docstring"""
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE__ : Any = RegNetModel
else:
SCREAMING_SNAKE_CASE__ : Any = RegNetForImageClassification
return val
def _a ( lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE__ : Tuple = from_state_dict[from_key].clone()
print(f'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def _a ( lowercase__ : str , lowercase__ : Callable[[], nn.Module] , lowercase__ : Callable[[], nn.Module] , lowercase__ : RegNetConfig , lowercase__ : Path , lowercase__ : bool = True , ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = from_model_func()
SCREAMING_SNAKE_CASE__ : int = our_model_func(lowercase__ ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = ModuleTransfer(src=lowercase__ , dest=lowercase__ , raise_if_mismatch=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(lowercase__ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE__ : int = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
SCREAMING_SNAKE_CASE__ : Optional[Any] = manually_copy_vissl_head(lowercase__ , our_model.state_dict() , lowercase__ )
our_model.load_state_dict(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = our_model(lowercase__ , output_hidden_states=lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = (
our_outputs.logits if isinstance(lowercase__ , lowercase__ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE__ : List[Any] = from_model(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = from_output[-1] if type(lowercase__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = our_outputs.hidden_states[-1]
assert torch.allclose(lowercase__ , lowercase__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=lowercase__ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2_24 if 'seer' not in name else 3_84
# we can use the convnext one
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=lowercase__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=lowercase__ , )
print(f'''Pushed {name}''' )
def _a ( lowercase__ : Path , lowercase__ : str = None , lowercase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE__ : Tuple = 10_00
SCREAMING_SNAKE_CASE__ : Tuple = (1, num_labels)
SCREAMING_SNAKE_CASE__ : str = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE__ : List[str] = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' ) ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = idalabel
SCREAMING_SNAKE_CASE__ : Tuple = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Any = partial(lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
SCREAMING_SNAKE_CASE__ : List[Any] = NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE__ : Dict = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowercase__ : str , lowercase__ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE__ : str = torch.hub.load_state_dict_from_url(lowercase__ , model_dir=str(lowercase__ ) , map_location='cpu' )
SCREAMING_SNAKE_CASE__ : Tuple = model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE__ : str = files['classy_state_dict']['base_model']['model']
SCREAMING_SNAKE_CASE__ : str = model_state_dict['trunk']
model.load_state_dict(lowercase__ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE__ : Any = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : int = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : List[Any] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
SCREAMING_SNAKE_CASE__ : List[Any] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE__ : Any = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowercase__ , lowercase__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowercase__ , lowercase__ , lowercase__ , )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 636 | 1 |
def _a ( lowercase__ : int = 10**9 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : Any = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
SCREAMING_SNAKE_CASE__ : Tuple = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 636 | import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'OwlViTImageProcessor'
lowercase_ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[str] , a_ : List[Any]=None , a_ : str=None , **a_ : Any )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE__ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a_ , a_ )
def __call__( self : Any , a_ : Optional[int]=None , a_ : Tuple=None , a_ : List[Any]=None , a_ : Tuple="max_length" , a_ : str="np" , **a_ : Any )-> int:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(a_ , a_ ) or (isinstance(a_ , a_ ) and not isinstance(text[0] , a_ )):
SCREAMING_SNAKE_CASE__ : Tuple = [self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )]
elif isinstance(a_ , a_ ) and isinstance(text[0] , a_ ):
SCREAMING_SNAKE_CASE__ : Any = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE__ : str = max([len(a_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(a_ ) != max_num_queries:
SCREAMING_SNAKE_CASE__ : Tuple = t + [' '] * (max_num_queries - len(a_ ))
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )
encodings.append(a_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
SCREAMING_SNAKE_CASE__ : Dict = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ : List[Any] = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ : str = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ : Dict = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
SCREAMING_SNAKE_CASE__ : Optional[int] = BatchEncoding()
SCREAMING_SNAKE_CASE__ : List[str] = input_ids
SCREAMING_SNAKE_CASE__ : Tuple = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE__ : Any = BatchEncoding()
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor(
a_ , return_tensors=a_ , **a_ ).pixel_values
SCREAMING_SNAKE_CASE__ : Dict = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Dict = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def __lowercase( self : str , *a_ : List[str] , **a_ : int )-> List[Any]:
"""simple docstring"""
return self.image_processor.post_process(*a_ , **a_ )
def __lowercase( self : Tuple , *a_ : List[str] , **a_ : str )-> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*a_ , **a_ )
def __lowercase( self : Optional[Any] , *a_ : str , **a_ : Dict )-> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*a_ , **a_ )
def __lowercase( self : Optional[int] , *a_ : Tuple , **a_ : Tuple )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def __lowercase( self : Tuple , *a_ : Tuple , **a_ : Tuple )-> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
def __lowercase( self : Tuple )-> Any:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a_ , )
return self.image_processor_class
@property
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a_ , )
return self.image_processor
| 636 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'marian'
lowercase_ = ['past_key_values']
lowercase_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Dict , a_ : Optional[Any]=5_8101 , a_ : Optional[Any]=None , a_ : Optional[int]=1024 , a_ : Optional[Any]=12 , a_ : int=4096 , a_ : List[str]=16 , a_ : Tuple=12 , a_ : Union[str, Any]=4096 , a_ : Dict=16 , a_ : List[Any]=0.0 , a_ : Optional[int]=0.0 , a_ : Optional[Any]=True , a_ : Union[str, Any]=True , a_ : Union[str, Any]="gelu" , a_ : Optional[int]=1024 , a_ : Dict=0.1 , a_ : List[Any]=0.0 , a_ : Optional[Any]=0.0 , a_ : Any=0.02 , a_ : Union[str, Any]=5_8100 , a_ : str=False , a_ : Optional[Any]=5_8100 , a_ : int=0 , a_ : Optional[Any]=0 , a_ : Union[str, Any]=True , **a_ : int , )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Any = decoder_vocab_size or vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : int = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : int = encoder_layers
SCREAMING_SNAKE_CASE__ : str = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Any = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dropout
SCREAMING_SNAKE_CASE__ : Any = attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = activation_dropout
SCREAMING_SNAKE_CASE__ : str = activation_function
SCREAMING_SNAKE_CASE__ : Dict = init_std
SCREAMING_SNAKE_CASE__ : List[str] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : int = use_cache
SCREAMING_SNAKE_CASE__ : Any = encoder_layers
SCREAMING_SNAKE_CASE__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : Dict = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , forced_eos_token_id=a_ , **a_ , )
class snake_case ( UpperCamelCase_ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __lowercase( self : Optional[Any] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : List[str] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ : List[Any] = {0: 'batch'}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {0: 'batch', 1: 'decoder_sequence'}
SCREAMING_SNAKE_CASE__ : str = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(a_ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE__ : Optional[int] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.num_layers
for i in range(a_ ):
SCREAMING_SNAKE_CASE__ : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
SCREAMING_SNAKE_CASE__ : str = {0: 'batch', 2: 'past_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE__ : Dict = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __lowercase( self : Union[str, Any] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Tuple = super().outputs
else:
SCREAMING_SNAKE_CASE__ : List[Any] = super(a_ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.num_layers
for i in range(a_ ):
SCREAMING_SNAKE_CASE__ : int = {0: 'batch', 2: 'past_sequence + sequence'}
SCREAMING_SNAKE_CASE__ : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __lowercase( self : Dict , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , )-> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self._generate_dummy_inputs_for_encoder_and_decoder(
a_ , a_ , a_ , a_ , a_ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE__ : Tuple = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
a_ , a_ , a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE__ : str = dict(**a_ , **a_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = common_inputs['input_ids'].shape
SCREAMING_SNAKE_CASE__ : str = common_inputs['decoder_input_ids'].shape[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE__ : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(a_ , a_ )] , dim=1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE__ : List[str] = min(a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = max(a_ , a_ ) - min_num_layers
SCREAMING_SNAKE_CASE__ : List[Any] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(a_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(a_ ),
torch.zeros(a_ ),
torch.zeros(a_ ),
torch.zeros(a_ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE__ : int = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(a_ , a_ ):
common_inputs["past_key_values"].append((torch.zeros(a_ ), torch.zeros(a_ )) )
return common_inputs
def __lowercase( self : Any , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , )-> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
a_ , a_ , a_ , a_ , a_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ : List[Any] = seqlen + 2
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.num_layers
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : str = common_inputs['attention_mask'].dtype
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat(
[common_inputs['attention_mask'], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
SCREAMING_SNAKE_CASE__ : List[str] = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(a_ )
]
return common_inputs
def __lowercase( self : Optional[int] , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , )-> Mapping[str, Any]:
"""simple docstring"""
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : Union[str, Any] = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.num_special_tokens_to_add(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ : Optional[Any] = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ : Dict = dict(tokenizer(a_ , return_tensors=a_ ) )
return common_inputs
def __lowercase( self : Optional[Any] , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , )-> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Any = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
else:
SCREAMING_SNAKE_CASE__ : str = self._generate_dummy_inputs_for_causal_lm(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
return common_inputs
def __lowercase( self : Any , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Optional[Any] , a_ : int )-> Optional[int]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Any = super()._flatten_past_key_values_(a_ , a_ , a_ , a_ )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = super(a_ , self )._flatten_past_key_values_(
a_ , a_ , a_ , a_ )
@property
def __lowercase( self : Optional[Any] )-> float:
"""simple docstring"""
return 1e-4
| 636 | class snake_case ( UpperCamelCase_ ):
pass
class snake_case ( UpperCamelCase_ ):
pass
class snake_case :
def __init__( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [
[],
[],
[],
]
def __lowercase( self : int , a_ : int , a_ : int )-> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(a_ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def __lowercase( self : int )-> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self : Any )-> str:
"""simple docstring"""
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class snake_case :
def __init__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = []
def __lowercase( self : List[str] , a_ : int )-> None:
"""simple docstring"""
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(a_ )
def __lowercase( self : int )-> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = min(self.queue )
self.queue.remove(a_ )
return data
def __str__( self : List[str] )-> str:
"""simple docstring"""
return str(self.queue )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 636 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : Tuple = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ["YolosFeatureExtractor"]
SCREAMING_SNAKE_CASE__ : str = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 636 | from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _a ( lowercase__ : List[str] ):
'''simple docstring'''
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE__ : str = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase__ ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[int] , *lowercase__ : int , **lowercase__ : Tuple ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase__ , **lowercase__ )
return wrapper
| 636 | 1 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class snake_case :
def __init__( self : Dict , a_ : str , a_ : Any=14 , a_ : int=7 , a_ : str=True , a_ : Dict=True , a_ : int=False , a_ : Dict=True , a_ : List[str]=99 , a_ : Union[str, Any]=32 , a_ : Optional[Any]=4 , a_ : List[Any]=4 , a_ : Union[str, Any]=4 , a_ : Dict=37 , a_ : List[Any]="gelu" , a_ : Optional[int]=0.1 , a_ : List[Any]=0.1 , a_ : Any=512 , a_ : Dict=0.02 , )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = seq_length
SCREAMING_SNAKE_CASE__ : str = is_training
SCREAMING_SNAKE_CASE__ : Any = use_input_mask
SCREAMING_SNAKE_CASE__ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rotary_dim
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size - 1
SCREAMING_SNAKE_CASE__ : Any = vocab_size - 1
SCREAMING_SNAKE_CASE__ : str = vocab_size - 1
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : List[Any] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=a_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowercase( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = config_and_inputs
SCREAMING_SNAKE_CASE__ : str = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowercase( self : Tuple , a_ : List[Any] , a_ : Any , a_ : Any , a_ : Optional[Any] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = 20
SCREAMING_SNAKE_CASE__ : int = model_class_name(a_ )
SCREAMING_SNAKE_CASE__ : Dict = model.init_cache(input_ids.shape[0] , a_ )
SCREAMING_SNAKE_CASE__ : Tuple = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
SCREAMING_SNAKE_CASE__ : List[str] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(
input_ids[:, :-1] , attention_mask=a_ , past_key_values=a_ , position_ids=a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE__ : Tuple = model(
input_ids[:, -1:] , attention_mask=a_ , past_key_values=outputs_cache.past_key_values , position_ids=a_ , )
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def __lowercase( self : Tuple , a_ : Dict , a_ : str , a_ : Dict , a_ : List[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 20
SCREAMING_SNAKE_CASE__ : Tuple = model_class_name(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE__ : Tuple = model.init_cache(input_ids.shape[0] , a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE__ : str = model(
input_ids[:, :-1] , attention_mask=a_ , past_key_values=a_ , position_ids=a_ , )
SCREAMING_SNAKE_CASE__ : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE__ : str = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=a_ , position_ids=a_ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_ , attention_mask=a_ )
SCREAMING_SNAKE_CASE__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowercase( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = FlaxGPTJModelTester(self )
def __lowercase( self : int )-> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(a_ , a_ , a_ , a_ )
def __lowercase( self : Dict )-> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
a_ , a_ , a_ , a_ )
@tooslow
def __lowercase( self : Any )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE__ : Any = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Any = model.config.eos_token_id
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.jit(model.generate )
SCREAMING_SNAKE_CASE__ : Any = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE__ : Any = tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(a_ , a_ )
@is_pt_flax_cross_test
def __lowercase( self : Optional[int] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE__ : Any = self._prepare_for_class(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE__ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE__ : List[str] = getattr(a_ , a_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pt_inputs['input_ids'].shape
SCREAMING_SNAKE_CASE__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a_ ):
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : str = 1
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = pt_model_class(a_ ).eval()
SCREAMING_SNAKE_CASE__ : int = model_class(a_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = pt_model(**a_ ).to_tuple()
SCREAMING_SNAKE_CASE__ : Dict = fx_model(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(a_ , a_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class.from_pretrained(a_ , from_pt=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = fx_model_loaded(**a_ ).to_tuple()
self.assertEqual(
len(a_ ) , len(a_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(a_ , a_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def __lowercase( self : int )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE__ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE__ : Optional[int] = getattr(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Any = pt_model_class(a_ ).eval()
SCREAMING_SNAKE_CASE__ : str = model_class(a_ , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ : Dict = load_flax_weights_in_pytorch_model(a_ , fx_model.params )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = pt_inputs['input_ids'].shape
SCREAMING_SNAKE_CASE__ : List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a_ ):
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : List[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = pt_model(**a_ ).to_tuple()
SCREAMING_SNAKE_CASE__ : Any = fx_model(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(a_ , a_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : int = pt_model_class.from_pretrained(a_ , from_flax=a_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = pt_model_loaded(**a_ ).to_tuple()
self.assertEqual(
len(a_ ) , len(a_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(a_ , a_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def __lowercase( self : str )-> Union[str, Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
SCREAMING_SNAKE_CASE__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
| 636 | import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _a ( lowercase__ : int ):
'''simple docstring'''
if is_torch_version('<' , '2.0.0' ) or not hasattr(lowercase__ , '_dynamo' ):
return False
return isinstance(lowercase__ , torch._dynamo.eval_frame.OptimizedModule )
def _a ( lowercase__ : Optional[Any] , lowercase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE__ : Dict = is_compiled_module(lowercase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : Tuple = model
SCREAMING_SNAKE_CASE__ : int = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Any = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(lowercase__ , 'forward' )
SCREAMING_SNAKE_CASE__ : str = model.__dict__.pop('_original_forward' , lowercase__ )
if original_forward is not None:
while hasattr(lowercase__ , '__wrapped__' ):
SCREAMING_SNAKE_CASE__ : Dict = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE__ : Dict = forward
if getattr(lowercase__ , '_converted_to_transformer_engine' , lowercase__ ):
convert_model(lowercase__ , to_transformer_engine=lowercase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : List[Any] = model
SCREAMING_SNAKE_CASE__ : Optional[Any] = compiled_model
return model
def _a ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _a ( lowercase__ : str , lowercase__ : Optional[Any] ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowercase__ , lowercase__ )
elif PartialState().local_process_index == 0:
torch.save(lowercase__ , lowercase__ )
@contextmanager
def _a ( **lowercase__ : str ):
'''simple docstring'''
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE__ : int = str(lowercase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if not hasattr(lowercase__ , '__qualname__' ) and not hasattr(lowercase__ , '__name__' ):
SCREAMING_SNAKE_CASE__ : Any = getattr(lowercase__ , '__class__' , lowercase__ )
if hasattr(lowercase__ , '__qualname__' ):
return obj.__qualname__
if hasattr(lowercase__ , '__name__' ):
return obj.__name__
return str(lowercase__ )
def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ):
'''simple docstring'''
for key, value in source.items():
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = destination.setdefault(lowercase__ , {} )
merge_dicts(lowercase__ , lowercase__ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = value
return destination
def _a ( lowercase__ : int = None ):
'''simple docstring'''
if port is None:
SCREAMING_SNAKE_CASE__ : int = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 636 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'visual_bert'
def __init__( self : Tuple , a_ : Optional[Any]=3_0522 , a_ : Any=768 , a_ : Tuple=512 , a_ : Any=12 , a_ : str=12 , a_ : Optional[int]=3072 , a_ : List[Any]="gelu" , a_ : Any=0.1 , a_ : int=0.1 , a_ : Any=512 , a_ : Union[str, Any]=2 , a_ : Union[str, Any]=0.02 , a_ : int=1e-1_2 , a_ : Optional[int]=False , a_ : Tuple=True , a_ : Optional[Any]=1 , a_ : List[str]=0 , a_ : Optional[int]=2 , **a_ : List[Any] , )-> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = visual_embedding_dim
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = bypass_transformer
SCREAMING_SNAKE_CASE__ : List[str] = special_visual_initialize
| 636 | from __future__ import annotations
def _a ( lowercase__ : list[int | float] , lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
if len(lowercase__ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(lowercase__ )
or left < -len(lowercase__ )
or right >= len(lowercase__ )
or right < -len(lowercase__ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : int = find_max(lowercase__ , lowercase__ , lowercase__ ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Tuple = find_max(lowercase__ , mid + 1 , lowercase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 636 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowercase_ = StableDiffusionDiffEditPipeline
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
lowercase_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase_ = frozenset([] )
def __lowercase( self : Optional[int] )-> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A__ , )
SCREAMING_SNAKE_CASE__ : Any = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=A__ , set_alpha_to_one=A__ , )
SCREAMING_SNAKE_CASE__ : List[str] = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=A__ , set_alpha_to_zero=A__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
SCREAMING_SNAKE_CASE__ : Dict = CLIPTextModel(A__ )
SCREAMING_SNAKE_CASE__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase( self : int , a_ : List[Any] , a_ : Optional[int]=0 )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 16, 16) , rng=random.Random(A__ ) ).to(A__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A__ ) ).to(A__ )
if str(A__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE__ : int = torch.manual_seed(A__ )
else:
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device=A__ ).manual_seed(A__ )
SCREAMING_SNAKE_CASE__ : List[str] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowercase( self : Dict , a_ : str , a_ : List[str]=0 )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = Image.fromarray(np.uinta(A__ ) ).convert('RGB' )
if str(A__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.manual_seed(A__ )
else:
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device=A__ ).manual_seed(A__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowercase( self : Any , a_ : Tuple , a_ : int=0 )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
SCREAMING_SNAKE_CASE__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : str = Image.fromarray(np.uinta(A__ ) ).convert('RGB' )
if str(A__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.manual_seed(A__ )
else:
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device=A__ ).manual_seed(A__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
if not hasattr(self.pipeline_class , '_optional_components' ):
return
SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str = self.pipeline_class(**A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A__ , A__ , A__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = pipe(**A__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.pipeline_class.from_pretrained(A__ )
pipe_loaded.to(A__ )
pipe_loaded.set_progress_bar_config(disable=A__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A__ , A__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(A__ )
SCREAMING_SNAKE_CASE__ : List[Any] = pipe_loaded(**A__ )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.abs(output - output_loaded ).max()
self.assertLess(A__ , 1e-4 )
def __lowercase( self : Dict )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu'
SCREAMING_SNAKE_CASE__ : int = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[Any] = self.pipeline_class(**A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_mask_inputs(A__ )
SCREAMING_SNAKE_CASE__ : Dict = pipe.generate_mask(**A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0] * 9 )
SCREAMING_SNAKE_CASE__ : List[str] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A__ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __lowercase( self : Optional[int] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = 'cpu'
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Tuple = self.pipeline_class(**A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_inversion_inputs(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.invert(**A__ ).images
SCREAMING_SNAKE_CASE__ : Any = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE__ : List[str] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
SCREAMING_SNAKE_CASE__ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A__ , 1e-3 )
def __lowercase( self : Tuple )-> int:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def __lowercase( self : Any )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = 'cpu'
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Any = {'beta_start': 0.0_0085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE__ : Optional[Any] = DPMSolverMultistepScheduler(**A__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = DPMSolverMultistepInverseScheduler(**A__ )
SCREAMING_SNAKE_CASE__ : Tuple = self.pipeline_class(**A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_inversion_inputs(A__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = pipe.invert(**A__ ).images
SCREAMING_SNAKE_CASE__ : Dict = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A__ , 1e-3 )
@require_torch_gpu
@slow
class snake_case ( unittest.TestCase ):
def __lowercase( self : int )-> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __lowercase( cls : Tuple )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
SCREAMING_SNAKE_CASE__ : List[str] = raw_image.convert('RGB' ).resize((768, 768) )
SCREAMING_SNAKE_CASE__ : str = raw_image
def __lowercase( self : Any )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=A__ , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE__ : int = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A__ )
SCREAMING_SNAKE_CASE__ : List[str] = 'a bowl of fruit'
SCREAMING_SNAKE_CASE__ : List[Any] = 'a bowl of pears'
SCREAMING_SNAKE_CASE__ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=A__ , target_prompt=A__ , generator=A__ , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.invert(
prompt=A__ , image=self.raw_image , inpaint_strength=0.7 , generator=A__ ).latents
SCREAMING_SNAKE_CASE__ : int = pipe(
prompt=A__ , mask_image=A__ , image_latents=A__ , generator=A__ , negative_prompt=A__ , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
SCREAMING_SNAKE_CASE__ : List[str] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def __lowercase( self : List[str] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=A__ , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE__ : str = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A__ )
SCREAMING_SNAKE_CASE__ : List[str] = 'a bowl of fruit'
SCREAMING_SNAKE_CASE__ : int = 'a bowl of pears'
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=A__ , target_prompt=A__ , generator=A__ , )
SCREAMING_SNAKE_CASE__ : Dict = pipe.invert(
prompt=A__ , image=self.raw_image , inpaint_strength=0.7 , generator=A__ , num_inference_steps=25 , ).latents
SCREAMING_SNAKE_CASE__ : int = pipe(
prompt=A__ , mask_image=A__ , image_latents=A__ , generator=A__ , negative_prompt=A__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 700 | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _a ( lowercase__ : Any ):
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _a ( lowercase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _a ( lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = [state.process_index]
SCREAMING_SNAKE_CASE__ : Any = gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, f'''{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _a ( lowercase__ : int ):
'''simple docstring'''
if state.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.arange(state.num_processes ).to(state.device )
SCREAMING_SNAKE_CASE__ : Any = pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE__ : List[Any] = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = reduce(lowercase__ , 'sum' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}'''
def _a ( lowercase__ : int ):
'''simple docstring'''
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE__ : Any = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = reduce(lowercase__ , 'mean' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}'''
def _a ( lowercase__ : int ):
'''simple docstring'''
main()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = PartialState()
state.print(f'''State: {state}''' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 636 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class snake_case ( unittest.TestCase ):
def __lowercase( self : List[Any] )-> Optional[int]:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowercase( self : Tuple )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : Dict = (32, 32)
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def __lowercase( self : List[str] )-> Any:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __lowercase( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __lowercase( self : Dict )-> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(lowerCAmelCase__ )
@property
def __lowercase( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
def extract(*a_ : Any , **a_ : List[Any] ):
class snake_case :
def __init__( self : List[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = torch.ones([0] )
def __lowercase( self : Union[str, Any] , a_ : Optional[int] )-> str:
"""simple docstring"""
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def __lowercase( self : Any )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : str = self.dummy_cond_unet
SCREAMING_SNAKE_CASE__ : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_vae
SCREAMING_SNAKE_CASE__ : str = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ : int = StableDiffusionPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__ : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : int = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = sd_pipe([prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
SCREAMING_SNAKE_CASE__ : int = output.images
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCAmelCase__ , )[0]
SCREAMING_SNAKE_CASE__ : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase( self : List[str] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE__ : List[str] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_vae
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe([prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCAmelCase__ , )[0]
SCREAMING_SNAKE_CASE__ : int = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert isinstance(pipe.scheduler , lowerCAmelCase__ )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE__ : List[str] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE__ : Tuple = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __lowercase( self : List[str] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.dummy_cond_unet
SCREAMING_SNAKE_CASE__ : Any = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_vae
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
SCREAMING_SNAKE_CASE__ : List[Any] = unet.half()
SCREAMING_SNAKE_CASE__ : int = vae.half()
SCREAMING_SNAKE_CASE__ : Optional[Any] = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : str = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE__ : List[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def __lowercase( self : int )-> int:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase( self : str )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE__ : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
SCREAMING_SNAKE_CASE__ : str = 40_0366_0346
SCREAMING_SNAKE_CASE__ : Optional[int] = 7
# without safety guidance (sld_guidance_scale = 0)
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE__ : Dict = output.images
SCREAMING_SNAKE_CASE__ : Tuple = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : int = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase( self : List[str] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE__ : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity'
SCREAMING_SNAKE_CASE__ : List[Any] = 27_3497_1755
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 7
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE__ : Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE__ : Tuple = output.images
SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Dict = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : int = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
SCREAMING_SNAKE_CASE__ : Any = 10_4435_5234
SCREAMING_SNAKE_CASE__ : Any = 12
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE__ : int = output.images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : int = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
SCREAMING_SNAKE_CASE__ : int = torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.images
SCREAMING_SNAKE_CASE__ : Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 701 | import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : Any = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , a_ : Optional[int] , a_ : Dict=7 , a_ : Any=3 , a_ : Any=18 , a_ : int=30 , a_ : int=400 , a_ : List[Any]=None , a_ : int=True , a_ : int=True , a_ : Dict=None , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'height': 20, 'width': 20}
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Any = num_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE__ : List[str] = min_resolution
SCREAMING_SNAKE_CASE__ : Dict = max_resolution
SCREAMING_SNAKE_CASE__ : List[Any] = size
SCREAMING_SNAKE_CASE__ : Tuple = do_normalize
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_convert_rgb
SCREAMING_SNAKE_CASE__ : List[str] = [512, 1024, 2048, 4096]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __lowercase( self : Dict )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(a_ , stream=a_ ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PixaStructImageProcessor if is_vision_available() else None
def __lowercase( self : List[str] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = PixaStructImageProcessingTester(self )
@property
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , 'do_normalize' ) )
self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor_tester.prepare_dummy_image()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ : List[Any] = 2048
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(a_ , return_tensors='pt' , max_patches=a_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase( self : Any )-> Any:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
SCREAMING_SNAKE_CASE__ : int = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(a_ ):
SCREAMING_SNAKE_CASE__ : Dict = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
SCREAMING_SNAKE_CASE__ : List[Any] = 'Hello'
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processor(
a_ , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
SCREAMING_SNAKE_CASE__ : str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PixaStructImageProcessor if is_vision_available() else None
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = PixaStructImageProcessingTester(self , num_channels=4 )
SCREAMING_SNAKE_CASE__ : Dict = 3
@property
def __lowercase( self : Any )-> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase( self : Dict )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , 'do_normalize' ) )
self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) )
def __lowercase( self : str )-> Union[str, Any]:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 636 | 0 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : Any = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class snake_case ( snake_case__ , unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : Optional[int] )-> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : str = PegasusTokenizer(UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase( self : Tuple )-> Dict:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def __lowercase( self : int , **a_ : List[str] )-> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def __lowercase( self : Optional[Any] , a_ : Dict )-> List[str]:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowercase( self : Optional[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = '</s>'
SCREAMING_SNAKE_CASE__ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def __lowercase( self : Any )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(UpperCAmelCase_ ) , 1103 )
def __lowercase( self : int )-> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def __lowercase( self : int )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : List[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE__ : int = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
SCREAMING_SNAKE_CASE__ : Any = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE__ : str = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
SCREAMING_SNAKE_CASE__ : Any = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowercase( self : Any )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
SCREAMING_SNAKE_CASE__ : List[str] = 'To ensure a smooth flow of bank resolutions.'
SCREAMING_SNAKE_CASE__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
SCREAMING_SNAKE_CASE__ : int = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = ['This is going to be way too long.' * 150, 'short example']
SCREAMING_SNAKE_CASE__ : Dict = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE__ : Dict = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : List[str] = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_ ) == 2 # input_ids, attention_mask.
@slow
def __lowercase( self : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class snake_case ( snake_case__ , unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : int = PegasusTokenizer(UpperCAmelCase_ , offset=0 , mask_token_sent=UpperCAmelCase_ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase( self : str )-> Dict:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def __lowercase( self : List[str] , **a_ : Any )-> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def __lowercase( self : Any , a_ : Union[str, Any] )-> str:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowercase( self : Optional[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : str = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
SCREAMING_SNAKE_CASE__ : List[Any] = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_torch
def __lowercase( self : Any )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ['This is going to be way too long.' * 1000, 'short example']
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE__ : List[Any] = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_ ) == 2 # input_ids, attention_mask.
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer(UpperCAmelCase_ ).input_ids
self.assertListEqual(
UpperCAmelCase_ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 702 | import heapq as hq
import math
from collections.abc import Iterator
class snake_case :
def __init__( self : str , a_ : str )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = str(id_ )
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : int , a_ : Tuple )-> Union[str, Any]:
"""simple docstring"""
return self.key < other.key
def __repr__( self : Any )-> Dict:
"""simple docstring"""
return self.id
def __lowercase( self : Optional[Any] , a_ : int )-> List[str]:
"""simple docstring"""
self.neighbors.append(a_ )
def __lowercase( self : int , a_ : int , a_ : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = weight
def _a ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Dict ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowercase__ )
graph[b - 1].add_edge(graph[a - 1] , lowercase__ )
def _a ( lowercase__ : list , lowercase__ : Vertex ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = []
for u in graph:
SCREAMING_SNAKE_CASE__ : Dict = math.inf
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : int = graph[:]
while q:
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(lowercase__ )
q.remove(lowercase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE__ : int = u
SCREAMING_SNAKE_CASE__ : Any = u.edges[v.id]
for i in range(1 , len(lowercase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _a ( lowercase__ : list , lowercase__ : Vertex ):
'''simple docstring'''
for u in graph:
SCREAMING_SNAKE_CASE__ : List[str] = math.inf
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : Tuple = list(lowercase__ )
hq.heapify(lowercase__ )
while h:
SCREAMING_SNAKE_CASE__ : Optional[int] = hq.heappop(lowercase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE__ : List[str] = u
SCREAMING_SNAKE_CASE__ : Dict = u.edges[v.id]
hq.heapify(lowercase__ )
for i in range(1 , len(lowercase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 636 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class snake_case ( _SCREAMING_SNAKE_CASE ):
lowercase_ = "perceiver"
def __init__( self : List[Any] , a_ : Tuple=256 , a_ : List[str]=1280 , a_ : List[Any]=768 , a_ : List[str]=1 , a_ : str=26 , a_ : Tuple=8 , a_ : Any=8 , a_ : str=None , a_ : Dict=None , a_ : int="kv" , a_ : int=1 , a_ : Dict=1 , a_ : str="gelu" , a_ : Any=0.1 , a_ : Any=0.02 , a_ : Optional[int]=1e-1_2 , a_ : List[str]=True , a_ : Dict=262 , a_ : Any=2048 , a_ : int=56 , a_ : List[str]=[368, 496] , a_ : List[Any]=16 , a_ : int=1920 , a_ : List[Any]=16 , a_ : Optional[int]=[1, 16, 224, 224] , **a_ : Union[str, Any] , )-> Any:
"""simple docstring"""
super().__init__(**A_ )
SCREAMING_SNAKE_CASE__ : Tuple = num_latents
SCREAMING_SNAKE_CASE__ : int = d_latents
SCREAMING_SNAKE_CASE__ : List[str] = d_model
SCREAMING_SNAKE_CASE__ : str = num_blocks
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE__ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = num_cross_attention_heads
SCREAMING_SNAKE_CASE__ : int = qk_channels
SCREAMING_SNAKE_CASE__ : str = v_channels
SCREAMING_SNAKE_CASE__ : Tuple = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE__ : Optional[int] = self_attention_widening_factor
SCREAMING_SNAKE_CASE__ : List[Any] = cross_attention_widening_factor
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE__ : int = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE__ : Tuple = image_size
# flow attributes
SCREAMING_SNAKE_CASE__ : Optional[int] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE__ : List[str] = num_frames
SCREAMING_SNAKE_CASE__ : int = audio_samples_per_frame
SCREAMING_SNAKE_CASE__ : str = samples_per_patch
SCREAMING_SNAKE_CASE__ : Any = output_shape
class snake_case ( _SCREAMING_SNAKE_CASE ):
@property
def __lowercase( self : List[str] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE__ : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def __lowercase( self : Dict )-> float:
"""simple docstring"""
return 1e-4
def __lowercase( self : Tuple , a_ : List[Any] , a_ : List[Any] = -1 , a_ : Any = -1 , a_ : Dict = -1 , a_ : Optional[int] = False , a_ : Optional[Any] = None , a_ : Tuple = 3 , a_ : Optional[Any] = 40 , a_ : Any = 40 , )-> Mapping[str, Any]:
"""simple docstring"""
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(A_ , A_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : int = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : str = preprocessor.num_special_tokens_to_add(A_ )
SCREAMING_SNAKE_CASE__ : List[Any] = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ : Optional[int] = [' '.join(['a'] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ : Any = dict(preprocessor(A_ , return_tensors=A_ ) )
SCREAMING_SNAKE_CASE__ : List[Any] = inputs.pop('input_ids' )
return inputs
elif isinstance(A_ , A_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : List[str] = compute_effective_axis_dimension(A_ , fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE__ : Any = self._generate_dummy_images(A_ , A_ , A_ , A_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(preprocessor(images=A_ , return_tensors=A_ ) )
SCREAMING_SNAKE_CASE__ : List[Any] = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 703 | def _a ( lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _a ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 636 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'gpt_bigcode'
lowercase_ = ['past_key_values']
lowercase_ = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , a_ : List[str]=5_0257 , a_ : Optional[int]=1024 , a_ : Union[str, Any]=768 , a_ : Optional[int]=12 , a_ : Tuple=12 , a_ : str=None , a_ : int="gelu_pytorch_tanh" , a_ : List[str]=0.1 , a_ : Union[str, Any]=0.1 , a_ : Tuple=0.1 , a_ : List[Any]=1e-5 , a_ : Dict=0.02 , a_ : int=True , a_ : Optional[int]=True , a_ : str=5_0256 , a_ : Tuple=5_0256 , a_ : Dict=True , a_ : Optional[int]=True , a_ : int=True , **a_ : str , )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = n_positions
SCREAMING_SNAKE_CASE__ : Any = n_embd
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_layer
SCREAMING_SNAKE_CASE__ : Optional[Any] = n_head
SCREAMING_SNAKE_CASE__ : Dict = n_inner
SCREAMING_SNAKE_CASE__ : Dict = activation_function
SCREAMING_SNAKE_CASE__ : Union[str, Any] = resid_pdrop
SCREAMING_SNAKE_CASE__ : Union[str, Any] = embd_pdrop
SCREAMING_SNAKE_CASE__ : str = attn_pdrop
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Any = scale_attn_weights
SCREAMING_SNAKE_CASE__ : Any = use_cache
SCREAMING_SNAKE_CASE__ : int = attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : str = multi_query
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[int] = eos_token_id
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 704 | from math import factorial, radians
def _a ( lowercase__ : float , lowercase__ : int = 18 , lowercase__ : int = 10 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE__ : int = radians(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = angle_in_radians
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 636 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
SCREAMING_SNAKE_CASE__ : List[str] = "pt"
elif is_tf_available():
SCREAMING_SNAKE_CASE__ : str = "tf"
else:
SCREAMING_SNAKE_CASE__ : str = "jax"
class snake_case ( lowercase__ , unittest.TestCase ):
lowercase_ = PerceiverTokenizer
lowercase_ = False
def __lowercase( self : List[Any] )-> int:
"""simple docstring"""
super().setUp()
__A : Tuple = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase( self : List[str] )-> List[str]:
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def __lowercase( self : Union[str, Any] , **a_ : str )-> str:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __lowercase( self : Any , a_ : Union[str, Any] , a_ : Dict=False , a_ : int=20 , a_ : Optional[Any]=5 )-> Any:
"""simple docstring"""
__A : Tuple = []
for i in range(len(__lowerCamelCase ) ):
try:
__A : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__A : Union[str, Any] = list(filter(lambda a_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , __lowerCamelCase ) )
__A : List[str] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCamelCase ) , __lowerCamelCase ) )
if max_length is not None and len(__lowerCamelCase ) > max_length:
__A : Union[str, Any] = toks[:max_length]
if min_length is not None and len(__lowerCamelCase ) < min_length and len(__lowerCamelCase ) > 0:
while len(__lowerCamelCase ) < min_length:
__A : Dict = toks + toks
# toks_str = [t[1] for t in toks]
__A : Tuple = [t[0] for t in toks]
# Ensure consistency
__A : Optional[int] = tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
if " " not in output_txt and len(__lowerCamelCase ) > 1:
__A : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCamelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCamelCase )
)
if with_prefix_space:
__A : Union[str, Any] = " " + output_txt
__A : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
return output_txt, output_ids
def __lowercase( self : Optional[int] )-> int:
"""simple docstring"""
__A : Optional[int] = self.perceiver_tokenizer
__A : Optional[Any] = "Unicode €."
__A : Dict = tokenizer(__lowerCamelCase )
__A : List[str] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , __lowerCamelCase )
# decoding
__A : Any = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , '[CLS]Unicode €.[SEP]' )
__A : int = tokenizer('e è é ê ë' )
__A : List[Any] = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , __lowerCamelCase )
# decoding
__A : str = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def __lowercase( self : Dict )-> Union[str, Any]:
"""simple docstring"""
__A : Union[str, Any] = self.perceiver_tokenizer
__A : Optional[int] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
__A : Any = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__A : str = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
if FRAMEWORK != "jax":
__A : Union[str, Any] = list(batch.input_ids.numpy()[0] )
else:
__A : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def __lowercase( self : List[str] )-> List[str]:
"""simple docstring"""
__A : Union[str, Any] = self.perceiver_tokenizer
__A : Optional[int] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__A : List[Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , __lowerCamelCase )
self.assertIn('attention_mask' , __lowerCamelCase )
self.assertNotIn('decoder_input_ids' , __lowerCamelCase )
self.assertNotIn('decoder_attention_mask' , __lowerCamelCase )
def __lowercase( self : Tuple )-> str:
"""simple docstring"""
__A : Tuple = self.perceiver_tokenizer
__A : Tuple = [
"Summary of the text.",
"Another summary.",
]
__A : List[Any] = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding='max_length' , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __lowercase( self : Dict )-> List[str]:
"""simple docstring"""
__A : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__A : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__A : int = tempfile.mkdtemp()
__A : List[Any] = " He is very happy, UNwant\u00E9d,running"
__A : Any = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__A : Optional[int] = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__A : str = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
__A : int = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__A : str = tempfile.mkdtemp()
__A : Optional[Any] = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(['bim', 'bambam'] )
__A : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__A : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
__A : Union[str, Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase )
__A : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__A : Any = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def __lowercase( self : Optional[int] )-> Any:
"""simple docstring"""
__A : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__A : Union[str, Any] = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__A : List[str] = json.load(__lowerCamelCase )
__A : int = [F'''<extra_id_{i}>''' for i in range(125 )]
__A : List[Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
__A : List[str] = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(__lowerCamelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__A : List[Any] = tokenizer_class.from_pretrained(
__lowerCamelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__A : int = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=__lowerCamelCase )]
__A : Any = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __lowercase( self : int )-> Optional[Any]:
"""simple docstring"""
__A : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def __lowercase( self : str )-> Tuple:
"""simple docstring"""
pass
def __lowercase( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
pass
def __lowercase( self : int )-> int:
"""simple docstring"""
pass
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
pass
def __lowercase( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
__A : Optional[int] = self.get_tokenizers(fast=__lowerCamelCase , do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__A : Union[str, Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
__A : str = tokenizer.convert_tokens_to_string(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
| 705 | import math
def _a ( lowercase__ : int ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
SCREAMING_SNAKE_CASE__ : Tuple = range(3 , int(math.sqrt(lowercase__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _a ( lowercase__ : List[str] , lowercase__ : Any=1 , **lowercase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = factor * value
SCREAMING_SNAKE_CASE__ : Dict = value
while not is_prime(lowercase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowercase__ )
return value
| 636 | 0 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 706 | import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case :
def __init__( self : str , a_ : List[str] , a_ : Tuple=13 , a_ : Dict=30 , a_ : Optional[int]=2 , a_ : Tuple=3 , a_ : Dict=True , a_ : int=True , a_ : Optional[Any]=32 , a_ : List[str]=5 , a_ : Any=4 , a_ : Dict=37 , a_ : Dict="gelu" , a_ : int=0.1 , a_ : Optional[Any]=0.1 , a_ : Any=10 , a_ : List[str]=0.02 , a_ : Any=3 , a_ : List[str]=None , a_ : Optional[int]=2 , )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : int = batch_size
SCREAMING_SNAKE_CASE__ : int = image_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = scope
SCREAMING_SNAKE_CASE__ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE__ : Optional[int] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_patches + 2
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowercase( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowercase( self : List[str] , a_ : List[str] , a_ : Optional[Any] , a_ : str )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = DeiTModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : List[Any] , a_ : List[str] , a_ : List[str] , a_ : List[Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = DeiTForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DeiTForMaskedImageModeling(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : int = model(a_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowercase( self : List[str] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = DeiTForImageClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : int = DeiTForImageClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase( self : int )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = DeiTModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
pass
def __lowercase( self : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def __lowercase( self : str )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a_ )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : List[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def __lowercase( self : str , a_ : str , a_ : Tuple , a_ : Union[str, Any]=False )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE__ : Tuple = model_class(a_ )
model.to(a_ )
model.train()
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**a_ ).loss
loss.backward()
def __lowercase( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**a_ ).loss
loss.backward()
def __lowercase( self : Optional[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[str] = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a_ ),
*get_values(a_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
SCREAMING_SNAKE_CASE__ : int = problem_type['title']
SCREAMING_SNAKE_CASE__ : Tuple = problem_type['num_labels']
SCREAMING_SNAKE_CASE__ : str = model_class(a_ )
model.to(a_ )
model.train()
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
SCREAMING_SNAKE_CASE__ : Any = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a_ ) as warning_list:
SCREAMING_SNAKE_CASE__ : str = model(**a_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __lowercase( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = DeiTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def __lowercase( self : int )-> Dict:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(images=a_ , return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**a_ )
# verify the logits
SCREAMING_SNAKE_CASE__ : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowercase( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(images=a_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : str = inputs.pixel_values.to(a_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
| 636 | 0 |
import baseaa
def _a ( lowercase__ : str ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def _a ( lowercase__ : bytes ):
'''simple docstring'''
return baseaa.aaadecode(lowercase__ ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 | import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self : List[Any] , a_ : Dict , a_ : Any=13 , a_ : Any=7 , a_ : Tuple=True , a_ : Tuple=True , a_ : Optional[int]=False , a_ : Dict=True , a_ : Optional[Any]=99 , a_ : Any=32 , a_ : Dict=5 , a_ : Tuple=4 , a_ : List[str]=37 , a_ : Union[str, Any]="gelu" , a_ : Dict=0.1 , a_ : Tuple=0.1 , a_ : List[str]=512 , a_ : List[str]=16 , a_ : List[str]=2 , a_ : Optional[int]=0.02 , a_ : List[str]=3 , a_ : Union[str, Any]=4 , a_ : Optional[Any]=None , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Dict = batch_size
SCREAMING_SNAKE_CASE__ : Dict = seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = num_labels
SCREAMING_SNAKE_CASE__ : Dict = num_choices
SCREAMING_SNAKE_CASE__ : str = scope
def __lowercase( self : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
def __lowercase( self : Any , a_ : str , a_ : Tuple , a_ : Dict , a_ : Optional[int] , a_ : List[Any] , a_ : Union[str, Any] , a_ : Tuple )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BioGptModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : List[Any] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Tuple , a_ : Optional[Any] , a_ : int , a_ : Optional[int] , a_ : int , a_ : str , a_ : Optional[Any] , )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = BioGptForCausalLM(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase( self : Tuple , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : Any , a_ : Optional[int] , *a_ : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = BioGptModel(config=a_ )
model.to(a_ )
model.eval()
# create attention mask
SCREAMING_SNAKE_CASE__ : Any = torch.ones(input_ids.shape , dtype=torch.long , device=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.seq_length // 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
# first forward pass
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
SCREAMING_SNAKE_CASE__ : str = ids_tensor((1,) , a_ ).item() + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = random_other_next_tokens
# append to next input_ids and attn_mask
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Dict = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=a_ )] , dim=1 , )
# get two different outputs
SCREAMING_SNAKE_CASE__ : str = model(a_ , attention_mask=a_ )['last_hidden_state']
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , past_key_values=a_ , attention_mask=a_ )['last_hidden_state']
# select random slice
SCREAMING_SNAKE_CASE__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : str , a_ : List[Any] , a_ : str , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[Any] , *a_ : List[str] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel(config=a_ ).to(a_ ).eval()
SCREAMING_SNAKE_CASE__ : Dict = torch.ones(input_ids.shape , dtype=torch.long , device=a_ )
# first forward pass
SCREAMING_SNAKE_CASE__ : Any = model(a_ , attention_mask=a_ , use_cache=a_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(a_ , attention_mask=a_ )['last_hidden_state']
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , attention_mask=a_ , past_key_values=a_ )[
'last_hidden_state'
]
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : Any , a_ : List[str] , a_ : Optional[int] , a_ : Any , a_ : Tuple , a_ : Any , *a_ : List[Any] , a_ : Union[str, Any]=False )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = BioGptForCausalLM(a_ )
model.to(a_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __lowercase( self : Union[str, Any] , a_ : List[str] , *a_ : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = BioGptModel(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __lowercase( self : Dict , a_ : Tuple , a_ : Tuple , a_ : List[str] , a_ : Any , a_ : str , *a_ : str )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.num_labels
SCREAMING_SNAKE_CASE__ : str = BioGptForTokenClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , attention_mask=a_ , token_type_ids=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase( self : Any )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowercase_ = (BioGptForCausalLM,) if is_torch_available() else ()
lowercase_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = False
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def __lowercase( self : Tuple )-> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : List[str] = type
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : int )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*a_ )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*a_ , gradient_checkpointing=a_ )
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*a_ )
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*a_ )
def __lowercase( self : str )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*a_ )
@slow
def __lowercase( self : List[str] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(a_ )
SCREAMING_SNAKE_CASE__ : Dict = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE__ : List[str] = 'left'
# Define PAD Token = EOS Token = 50256
SCREAMING_SNAKE_CASE__ : Any = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : Tuple = model.config.eos_token_id
# use different length sentences to test batching
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'Hello, my dog is a little',
'Today, I',
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(a_ , return_tensors='pt' , padding=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = inputs['input_ids'].to(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = model.generate(
input_ids=a_ , attention_mask=inputs['attention_mask'].to(a_ ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(a_ )
SCREAMING_SNAKE_CASE__ : Dict = model.generate(input_ids=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(input_ids=a_ , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(a_ , a_ )
self.assertListEqual(a_ , [non_padded_sentence, padded_sentence] )
@slow
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowercase( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] = 3
SCREAMING_SNAKE_CASE__ : List[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.ne(1 ).to(a_ )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : int = BioGptForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : str = 3
SCREAMING_SNAKE_CASE__ : Any = 'multi_label_classification'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE__ : Any = input_ids.ne(1 ).to(a_ )
SCREAMING_SNAKE_CASE__ : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ : Dict = BioGptForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def __lowercase( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([[2, 4805, 9, 656, 21]] )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ )[0]
SCREAMING_SNAKE_CASE__ : List[str] = 4_2384
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , a_ )
SCREAMING_SNAKE_CASE__ : int = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
@slow
def __lowercase( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE__ : Dict = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(a_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer('COVID-19 is' , return_tensors='pt' ).to(a_ )
SCREAMING_SNAKE_CASE__ : int = model.generate(
**a_ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=a_ , )
SCREAMING_SNAKE_CASE__ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(a_ , a_ )
| 636 | 0 |
import os
from pathlib import Path
def _a ( lowercase__ : int , lowercase__ : str , lowercase__ : str , lowercase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
SCREAMING_SNAKE_CASE__ : Tuple = f'''{src_lang}-{tgt_lang}'''
SCREAMING_SNAKE_CASE__ : List[Any] = f'''\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'''
model_card_dir.mkdir(parents=__A , exist_ok=__A )
SCREAMING_SNAKE_CASE__ : str = os.path.join(__A , 'README.md' )
print(f'''Generating {path}''' )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(__A )
# make sure we are under the root of the project
SCREAMING_SNAKE_CASE__ : Dict = Path(__file__).resolve().parent.parent.parent
SCREAMING_SNAKE_CASE__ : Optional[Any] = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
SCREAMING_SNAKE_CASE__ : str = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 708 | import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : Optional[Any] = random.Random()
def _a ( lowercase__ : List[str] , lowercase__ : List[Any]=1.0 , lowercase__ : Optional[int]=None , lowercase__ : List[str]=None ):
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = global_rng
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , a_ : Optional[Any] , a_ : Union[str, Any]=7 , a_ : Any=400 , a_ : List[Any]=2000 , a_ : Tuple=1 , a_ : Optional[int]=0.0 , a_ : Optional[Any]=1_6000 , a_ : str=True , a_ : Union[str, Any]=80 , a_ : Dict=16 , a_ : Tuple=64 , a_ : Any="hann_window" , a_ : Union[str, Any]=80 , a_ : List[Any]=7600 , a_ : Optional[Any]=1e-1_0 , a_ : Dict=True , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = min_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = max_seq_length
SCREAMING_SNAKE_CASE__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__ : int = feature_size
SCREAMING_SNAKE_CASE__ : str = padding_value
SCREAMING_SNAKE_CASE__ : Any = sampling_rate
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : int = num_mel_bins
SCREAMING_SNAKE_CASE__ : int = hop_length
SCREAMING_SNAKE_CASE__ : str = win_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = win_function
SCREAMING_SNAKE_CASE__ : List[str] = fmin
SCREAMING_SNAKE_CASE__ : Dict = fmax
SCREAMING_SNAKE_CASE__ : int = mel_floor
SCREAMING_SNAKE_CASE__ : Tuple = return_attention_mask
def __lowercase( self : Dict )-> Dict:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __lowercase( self : List[Any] , a_ : str=False , a_ : List[Any]=False )-> Optional[Any]:
"""simple docstring"""
def _flatten(a_ : int ):
return list(itertools.chain(*a_ ) )
if equal_length:
SCREAMING_SNAKE_CASE__ : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : int = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
def __lowercase( self : Any , a_ : int=False , a_ : Any=False )-> Union[str, Any]:
"""simple docstring"""
if equal_length:
SCREAMING_SNAKE_CASE__ : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Tuple = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : List[str] = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = SpeechTaFeatureExtractor
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = SpeechTaFeatureExtractionTester(self )
def __lowercase( self : Any , a_ : Optional[int] )-> List[str]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_ , axis=0 ) - 1 ) < 1e-3 ) )
def __lowercase( self : Tuple )-> Dict:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ : Tuple = [None, 1600, None]
for max_length, padding in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : str = feat_extract(a_ , padding=a_ , max_length=a_ , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : List[Any] = range(800 , 1400 , 200 )
SCREAMING_SNAKE_CASE__ : int = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , max_length=a_ , padding=a_ )
SCREAMING_SNAKE_CASE__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __lowercase( self : int )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract(
a_ , truncation=a_ , max_length=1000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(
a_ , truncation=a_ , max_length=1000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : str = feat_extract(
a_ , truncation=a_ , max_length=2000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __lowercase( self : Any )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__ : Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Dict = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(audio_target=a_ , padding=a_ , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : int = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : str = feature_extractor(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : Dict )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_ , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE__ : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowercase( self : Tuple )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Dict = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : str = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : List[Any] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : Any = [len(a_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a_ )
def __lowercase( self : str )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : Tuple = [len(a_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : str = min(a_ )
SCREAMING_SNAKE_CASE__ : Any = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : int = feat_extract.pad(
a_ , padding='max_length' , max_length=a_ , truncation=a_ , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __lowercase( self : Optional[int] , a_ : List[str] )-> Any:
"""simple docstring"""
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : int = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__ : List[Any] = ds.sort('id' ).select(range(a_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowercase( self : List[str] )-> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
SCREAMING_SNAKE_CASE__ : List[str] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(a_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , a_ , atol=1e-6 ) )
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : int = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : str = feature_extractor(audio_target=a_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a_ , atol=1e-4 ) )
| 636 | 0 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['vqvae']
def __init__( self : Tuple , a_ : List[Any] , a_ : Dict , a_ : Union[str, Any] , a_ : int , )-> Optional[int]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , mel=_lowerCAmelCase , vqvae=_lowerCAmelCase )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
return 50 if isinstance(self.scheduler , _lowerCAmelCase ) else 1000
@torch.no_grad()
def __call__( self : Dict , a_ : int = 1 , a_ : Optional[Any] = None , a_ : Any = None , a_ : Union[str, Any] = 0 , a_ : Tuple = 0 , a_ : Optional[int] = None , a_ : List[Any] = None , a_ : List[str] = 0 , a_ : Tuple = 0 , a_ : Tuple = None , a_ : Dict = 0 , a_ : List[Any] = None , a_ : str = None , a_ : List[Any]=True , )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = steps or self.get_default_steps()
self.scheduler.set_timesteps(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE__ : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_lowerCAmelCase , device=self.device , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = noise
SCREAMING_SNAKE_CASE__ : int = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = self.mel.audio_slice_to_image(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE__ : List[Any] = (input_image / 255) * 2 - 1
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.vqvae.encode(torch.unsqueeze(_lowerCAmelCase , 0 ) ).latent_dist.sample(
generator=_lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE__ : Dict = self.scheduler.add_noise(_lowerCAmelCase , _lowerCAmelCase , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE__ : List[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE__ : str = int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : int = int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE__ : Any = self.scheduler.add_noise(_lowerCAmelCase , _lowerCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Dict = self.unet(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )['sample']
else:
SCREAMING_SNAKE_CASE__ : Dict = self.unet(_lowerCAmelCase , _lowerCAmelCase )['sample']
if isinstance(self.scheduler , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = self.scheduler.step(
model_output=_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , eta=_lowerCAmelCase , generator=_lowerCAmelCase , )['prev_sample']
else:
SCREAMING_SNAKE_CASE__ : str = self.scheduler.step(
model_output=_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , generator=_lowerCAmelCase , )['prev_sample']
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE__ : Tuple = 1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE__ : int = self.vqvae.decode(_lowerCAmelCase )['sample']
SCREAMING_SNAKE_CASE__ : Tuple = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (images * 255).round().astype('uint8' )
SCREAMING_SNAKE_CASE__ : str = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_lowerCAmelCase , mode='RGB' ).convert('L' ) for _ in images) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.mel.image_to_audio(_lowerCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_lowerCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_lowerCAmelCase ) )
@torch.no_grad()
def __lowercase( self : Union[str, Any] , a_ : Dict , a_ : List[Any] = 50 )-> List[str]:
"""simple docstring"""
assert isinstance(self.scheduler , _lowerCAmelCase )
self.scheduler.set_timesteps(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE__ : List[Any] = (sample / 255) * 2 - 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Tensor(_lowerCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__ : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__ : List[str] = self.unet(_lowerCAmelCase , _lowerCAmelCase )['sample']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE__ : str = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE__ : List[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowercase( a_ : int , a_ : Tuple , a_ : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = acos(torch.dot(torch.flatten(_lowerCAmelCase ) , torch.flatten(_lowerCAmelCase ) ) / torch.norm(_lowerCAmelCase ) / torch.norm(_lowerCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(_lowerCAmelCase ) + sin(alpha * theta ) * xa / sin(_lowerCAmelCase )
| 709 | import math
import sys
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ''
try:
with open(lowercase__ , 'rb' ) as binary_file:
SCREAMING_SNAKE_CASE__ : Tuple = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE__ : Tuple = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = '', ''
SCREAMING_SNAKE_CASE__ : Tuple = len(lowercase__ )
for i in range(len(lowercase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE__ : int = lexicon[curr_string]
result += last_match_id
SCREAMING_SNAKE_CASE__ : str = last_match_id + '0'
if math.loga(lowercase__ ).is_integer():
SCREAMING_SNAKE_CASE__ : List[str] = {}
for curr_key in list(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = lexicon.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = new_lex
SCREAMING_SNAKE_CASE__ : Any = last_match_id + '1'
index += 1
SCREAMING_SNAKE_CASE__ : Tuple = ''
return result
def _a ( lowercase__ : str , lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = 8
try:
with open(lowercase__ , 'wb' ) as opened_file:
SCREAMING_SNAKE_CASE__ : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(lowercase__ ) , lowercase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowercase__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = data_bits[counter:]
SCREAMING_SNAKE_CASE__ : int = data_bits[counter + 1 :]
return data_bits
def _a ( lowercase__ : str , lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = read_file_binary(lowercase__ )
SCREAMING_SNAKE_CASE__ : Dict = remove_prefix(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = decompress_data(lowercase__ )
write_file_binary(lowercase__ , lowercase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 636 | 0 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case ( UpperCamelCase_ ):
def __init__( self : int , a_ : List[Any] , a_ : Tuple=13 , a_ : List[str]=7 , a_ : Dict=True , a_ : str=True , a_ : List[str]=True , a_ : Optional[Any]=True , a_ : List[str]=True , a_ : Tuple=False , a_ : Optional[int]=False , a_ : Union[str, Any]=False , a_ : List[Any]=2 , a_ : Optional[int]=99 , a_ : Optional[int]=0 , a_ : str=32 , a_ : Optional[Any]=5 , a_ : Union[str, Any]=4 , a_ : Any=0.1 , a_ : Tuple=0.1 , a_ : Union[str, Any]=512 , a_ : str=12 , a_ : Union[str, Any]=2 , a_ : Optional[int]=0.02 , a_ : int=3 , a_ : Optional[Any]=4 , a_ : int="last" , a_ : List[Any]=None , a_ : Optional[Any]=None , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Any = use_input_lengths
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[str] = use_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = gelu_activation
SCREAMING_SNAKE_CASE__ : Tuple = sinusoidal_embeddings
SCREAMING_SNAKE_CASE__ : str = causal
SCREAMING_SNAKE_CASE__ : Union[str, Any] = asm
SCREAMING_SNAKE_CASE__ : Optional[int] = n_langs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = n_special
SCREAMING_SNAKE_CASE__ : List[str] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE__ : Union[str, Any] = summary_type
SCREAMING_SNAKE_CASE__ : Dict = use_proj
SCREAMING_SNAKE_CASE__ : Dict = scope
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def __lowercase( self : Any , a_ : Any , a_ : Dict , a_ : Dict , a_ : str , a_ : str , a_ : Any , a_ : Tuple , a_ : List[Any] , a_ : Optional[Any] , )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = FlaubertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(UpperCamelCase_ , lengths=UpperCamelCase_ , langs=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : int = model(UpperCamelCase_ , langs=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : Dict , a_ : Any , a_ : Any , a_ : str , a_ : Optional[int] , a_ : Any , a_ : int , a_ : Optional[Any] , a_ : int , a_ : List[Any] , )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = FlaubertWithLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase( self : Any , a_ : Union[str, Any] , a_ : Any , a_ : str , a_ : Optional[int] , a_ : Optional[Any] , a_ : List[str] , a_ : Any , a_ : Dict , a_ : Union[str, Any] , )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaubertForQuestionAnsweringSimple(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : Dict = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase( self : Optional[Any] , a_ : Dict , a_ : Any , a_ : List[Any] , a_ : Union[str, Any] , a_ : Dict , a_ : Any , a_ : Optional[Any] , a_ : Optional[int] , a_ : Optional[int] , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = FlaubertForQuestionAnswering(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : str = model(
UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , p_mask=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ : Dict = model(
UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , )
(SCREAMING_SNAKE_CASE__ ) : str = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE__ : List[Any] = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
(SCREAMING_SNAKE_CASE__ ) : Tuple = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowercase( self : Optional[Any] , a_ : List[str] , a_ : Tuple , a_ : List[Any] , a_ : Optional[Any] , a_ : Any , a_ : Any , a_ : int , a_ : Union[str, Any] , a_ : Any , )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaubertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : int = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase( self : Any , a_ : Any , a_ : Optional[Any] , a_ : int , a_ : List[Any] , a_ : Tuple , a_ : Optional[int] , a_ : int , a_ : Any , a_ : str , )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaubertForTokenClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase( self : Optional[Any] , a_ : List[str] , a_ : Dict , a_ : Dict , a_ : Dict , a_ : str , a_ : Tuple , a_ : Tuple , a_ : List[str] , a_ : Any , )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE__ : Dict = FlaubertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : str = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE__
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE__ : str = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowercase( self : Union[str, Any] , a_ : int , a_ : Any , a_ : List[Any] , a_ : Any , a_ : Any )-> Any:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowercase( self : Any , a_ : Tuple , a_ : List[Any] , a_ : int=False )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE__ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = FlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ : int = ConfigTester(self , config_class=UpperCamelCase_ , emb_dim=37 )
def __lowercase( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCamelCase_ )
def __lowercase( self : List[str] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase_ )
def __lowercase( self : str )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*UpperCamelCase_ )
def __lowercase( self : Dict )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase_ )
def __lowercase( self : str )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase_ )
def __lowercase( self : Tuple )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*UpperCamelCase_ )
def __lowercase( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*UpperCamelCase_ )
@slow
def __lowercase( self : List[str] )-> List[str]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[str] = FlaubertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
@require_torch_gpu
def __lowercase( self : Tuple )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : Tuple = model_class(config=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : Any = torch.jit.trace(
UpperCamelCase_ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , 'traced_model.pt' ) )
SCREAMING_SNAKE_CASE__ : Dict = torch.jit.load(os.path.join(UpperCamelCase_ , 'traced_model.pt' ) , map_location=UpperCamelCase_ )
loaded(inputs_dict['input_ids'].to(UpperCamelCase_ ) , inputs_dict['attention_mask'].to(UpperCamelCase_ ) )
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def __lowercase( self : str )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
SCREAMING_SNAKE_CASE__ : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(UpperCamelCase_ )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : int = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1e-4 ) )
| 710 | def _a ( lowercase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : List[Any] = set({'(', '[', '{'} )
SCREAMING_SNAKE_CASE__ : Optional[int] = set({')', ']', '}'} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'{': '}', '[': ']', '(': ')'}
for i in range(len(lowercase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowercase__ ) == 0 or (len(lowercase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowercase__ ) == 0
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = input('Enter sequence of brackets: ' )
if is_balanced(lowercase__ ):
print(lowercase__ , 'is balanced' )
else:
print(lowercase__ , 'is not balanced' )
if __name__ == "__main__":
main()
| 636 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
SCREAMING_SNAKE_CASE__ : str = threading.Lock()
SCREAMING_SNAKE_CASE__ : Optional[logging.Handler] = None
SCREAMING_SNAKE_CASE__ : List[Any] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
SCREAMING_SNAKE_CASE__ : str = logging.WARNING
SCREAMING_SNAKE_CASE__ : Dict = True
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = os.getenv('TRANSFORMERS_VERBOSITY' , _lowerCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def _a ( ):
'''simple docstring'''
return __name__.split('.' )[0]
def _a ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def _a ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE__ : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ : List[str] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE__ : Tuple = False
def _a ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE__ : str = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE__ : int = None
def _a ( ):
'''simple docstring'''
return log_levels
def _a ( lowercase__ : Optional[str] = None ):
'''simple docstring'''
if name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_lowerCAmelCase )
def _a ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _a ( lowercase__ : int ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(_lowerCAmelCase )
def _a ( ):
'''simple docstring'''
return set_verbosity(_lowerCAmelCase )
def _a ( ):
'''simple docstring'''
return set_verbosity(_lowerCAmelCase )
def _a ( ):
'''simple docstring'''
return set_verbosity(_lowerCAmelCase )
def _a ( ):
'''simple docstring'''
return set_verbosity(_lowerCAmelCase )
def _a ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _a ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _a ( lowercase__ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_lowerCAmelCase )
def _a ( lowercase__ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_lowerCAmelCase )
def _a ( ):
'''simple docstring'''
_configure_library_root_logger()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def _a ( ):
'''simple docstring'''
_configure_library_root_logger()
SCREAMING_SNAKE_CASE__ : List[Any] = True
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE__ : Dict = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(_lowerCAmelCase )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_lowerCAmelCase )
def _a ( self : List[str] , *lowercase__ : int , **lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , _lowerCAmelCase )
if no_advisory_warnings:
return
self.warning(*_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = warning_advice
@functools.lru_cache(_lowerCAmelCase )
def _a ( self : Dict , *lowercase__ : Any , **lowercase__ : str ):
'''simple docstring'''
self.warning(*_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = warning_once
class snake_case :
def __init__( self : Optional[Any] , *a_ : List[str] , **a_ : Union[str, Any] )-> List[Any]: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = args[0] if args else None
def __iter__( self : str )-> Optional[int]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : List[Any] , a_ : Union[str, Any] )-> Any:
"""simple docstring"""
def empty_fn(*a_ : Optional[Any] , **a_ : Tuple ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Any )-> List[Any]:
"""simple docstring"""
return self
def __exit__( self : Any , a_ : List[Any] , a_ : Optional[int] , a_ : int )-> List[str]:
"""simple docstring"""
return
class snake_case :
def __call__( self : Optional[Any] , *a_ : Optional[Any] , **a_ : Union[str, Any] )-> Dict:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*__A , **__A )
else:
return EmptyTqdm(*__A , **__A )
def __lowercase( self : str , *a_ : Tuple , **a_ : Optional[int] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__A , **__A )
def __lowercase( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
SCREAMING_SNAKE_CASE__ : str = _tqdm_cls()
def _a ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def _a ( ):
'''simple docstring'''
global _tqdm_active
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
hf_hub_utils.enable_progress_bars()
def _a ( ):
'''simple docstring'''
global _tqdm_active
SCREAMING_SNAKE_CASE__ : List[Any] = False
hf_hub_utils.disable_progress_bars()
| 711 | import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : List[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : int )-> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : List[Any] = PegasusTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def __lowercase( self : Any , **a_ : Optional[Any] )-> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Union[str, Any] , a_ : List[Any] )-> Optional[int]:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowercase( self : Optional[int] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = '</s>'
SCREAMING_SNAKE_CASE__ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def __lowercase( self : Dict )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(a_ ) , 1103 )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def __lowercase( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE__ : Any = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
SCREAMING_SNAKE_CASE__ : List[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
def __lowercase( self : int )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
SCREAMING_SNAKE_CASE__ : int = 'To ensure a smooth flow of bank resolutions.'
SCREAMING_SNAKE_CASE__ : List[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ['This is going to be way too long.' * 150, 'short example']
SCREAMING_SNAKE_CASE__ : int = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._large_tokenizer(
text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a_ ) == 2 # input_ids, attention_mask.
@slow
def __lowercase( self : Any )-> str:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : Any )-> Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[int] = PegasusTokenizer(a_ , offset=0 , mask_token_sent=a_ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase( self : Optional[Any] )-> List[str]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def __lowercase( self : List[str] , **a_ : Optional[Any] )-> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Optional[Any] , a_ : Tuple )-> str:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowercase( self : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE__ : str = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
SCREAMING_SNAKE_CASE__ : str = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
@require_torch
def __lowercase( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ['This is going to be way too long.' * 1000, 'short example']
SCREAMING_SNAKE_CASE__ : Optional[int] = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE__ : str = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer(
text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a_ ) == 2 # input_ids, attention_mask.
def __lowercase( self : Dict )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._large_tokenizer(a_ ).input_ids
self.assertListEqual(
a_ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 636 | 0 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case :
def __init__( self : int , a_ : Dict , a_ : str=13 , a_ : Tuple=30 , a_ : List[str]=2 , a_ : List[Any]=3 , a_ : Optional[int]=True , a_ : Any=True , a_ : List[str]=32 , a_ : str=2 , a_ : Optional[Any]=4 , a_ : Optional[Any]=37 , a_ : List[Any]="gelu" , a_ : Tuple=0.1 , a_ : List[str]=0.1 , a_ : Optional[int]=10 , a_ : Tuple=0.02 , a_ : Optional[Any]=3 , a_ : List[str]=None , )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = parent
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : str = num_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ : Dict = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : Dict = num_patches + 1
def __lowercase( self : Any )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : str = self.get_config()
return config, pixel_values, labels
def __lowercase( self : List[str] )-> Dict:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , )
def __lowercase( self : Dict , a_ : Tuple , a_ : int , a_ : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = TFViTModel(config=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a , training=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE__ : Tuple = self.image_size // 2
SCREAMING_SNAKE_CASE__ : List[Any] = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , interpolate_pos_encoding=_a , training=_a )
SCREAMING_SNAKE_CASE__ : List[Any] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __lowercase( self : Tuple , a_ : Union[str, Any] , a_ : int , a_ : List[str] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[str] = TFViTForImageClassification(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a , labels=_a , training=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_size // 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a , interpolate_pos_encoding=_a , training=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
SCREAMING_SNAKE_CASE__ : Tuple = TFViTForImageClassification(_a )
SCREAMING_SNAKE_CASE__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Any = config_and_inputs
SCREAMING_SNAKE_CASE__ : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowercase_ = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = TFViTModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def __lowercase( self : str )-> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
pass
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , tf.keras.layers.Layer ) )
def __lowercase( self : Any )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(_a )
SCREAMING_SNAKE_CASE__ : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase( self : str )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase( self : Optional[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_a )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def __lowercase( self : str )-> str:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def __lowercase( self : Optional[int] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : int = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(images=_a , return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE__ : Tuple = model(**_a )
# verify the logits
SCREAMING_SNAKE_CASE__ : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
SCREAMING_SNAKE_CASE__ : Dict = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _a , atol=1e-4 )
| 712 | def _a ( lowercase__ : int = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowercase__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 636 | 0 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class snake_case ( unittest.TestCase ):
lowercase_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __lowercase( self : List[Any] , a_ : List[str] , a_ : str , a_ : int )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
SCREAMING_SNAKE_CASE__ : Any = VideoClassificationPipeline(model=a_ , image_processor=a_ , top_k=2 )
SCREAMING_SNAKE_CASE__ : List[str] = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __lowercase( self : str , a_ : Optional[Any] , a_ : Optional[int] )-> List[Any]:
"""simple docstring"""
for example in examples:
SCREAMING_SNAKE_CASE__ : Any = video_classifier(a_ )
self.assertEqual(
a_ , [
{'score': ANY(a_ ), 'label': ANY(a_ )},
{'score': ANY(a_ ), 'label': ANY(a_ )},
] , )
@require_torch
def __lowercase( self : Optional[int] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline(
'video-classification' , model=a_ , feature_extractor=a_ , frame_sampling_rate=4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
SCREAMING_SNAKE_CASE__ : Optional[int] = video_classifier(a_ , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
] , )
@require_tf
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
pass
| 713 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any]=False , lowercase__ : str=False , lowercase__ : Dict=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _a ( lowercase__ : List[str] , lowercase__ : Dict ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ : Dict = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_bias[-config.hidden_size :]
def _a ( lowercase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _a ( lowercase__ : int , lowercase__ : int , lowercase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = dct.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = val
@torch.no_grad()
def _a ( lowercase__ : Dict , lowercase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : str = False
if "vqa" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : str = 31_29
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ : int = 'vqa2-id2label.json'
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Dict = idalabel
SCREAMING_SNAKE_CASE__ : str = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[str] = ViltForQuestionAnswering(lowercase__ )
elif "nlvr" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Dict = {0: 'False', 1: 'True'}
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in config.idalabel.items()}
SCREAMING_SNAKE_CASE__ : Tuple = 3
SCREAMING_SNAKE_CASE__ : int = ViltForImagesAndTextClassification(lowercase__ )
elif "irtr" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : str = ViltForImageAndTextRetrieval(lowercase__ )
elif "mlm_itm" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Optional[int] = ViltForMaskedLM(lowercase__ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ : Any = torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )['state_dict']
SCREAMING_SNAKE_CASE__ : Any = create_rename_keys(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ )
if mlm_model or irtr_model:
SCREAMING_SNAKE_CASE__ : Any = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowercase__ )
# Define processor
SCREAMING_SNAKE_CASE__ : str = ViltImageProcessor(size=3_84 )
SCREAMING_SNAKE_CASE__ : List[Any] = BertTokenizer.from_pretrained('bert-base-uncased' )
SCREAMING_SNAKE_CASE__ : List[Any] = ViltProcessor(lowercase__ , lowercase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
SCREAMING_SNAKE_CASE__ : List[str] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw )
SCREAMING_SNAKE_CASE__ : Any = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw )
SCREAMING_SNAKE_CASE__ : Tuple = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
SCREAMING_SNAKE_CASE__ : List[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : List[str] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : List[Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
SCREAMING_SNAKE_CASE__ : Tuple = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=lowercase__ ).raw )
if mlm_model:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'a bunch of [MASK] laying on a [MASK].'
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'How many cats are there?'
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : str = model(**lowercase__ )
# Verify outputs
if mlm_model:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size([1, 11, 3_05_22] )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 )
# verify masked token prediction equals "cats"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
SCREAMING_SNAKE_CASE__ : str = torch.Size([1, 31_29] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 )
# verify vqa prediction equals "2"
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size([1, 2] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 636 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class snake_case :
lowercase_ = LEDConfig
lowercase_ = {}
lowercase_ = 'gelu'
def __init__( self : Union[str, Any] , a_ : Optional[Any] , a_ : str=13 , a_ : Any=7 , a_ : Union[str, Any]=True , a_ : Dict=False , a_ : Any=99 , a_ : str=32 , a_ : Union[str, Any]=2 , a_ : Optional[Any]=4 , a_ : List[str]=37 , a_ : Any=0.1 , a_ : Dict=0.1 , a_ : Any=20 , a_ : List[str]=2 , a_ : Any=1 , a_ : int=0 , a_ : Optional[int]=4 , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : Dict = batch_size
SCREAMING_SNAKE_CASE__ : Dict = seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : str = use_labels
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = eos_token_id
SCREAMING_SNAKE_CASE__ : Tuple = pad_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE__ : Dict = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE__ : Dict = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __lowercase( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE__ : str = prepare_led_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tf.concat(
[tf.zeros_like(_UpperCAmelCase )[:, :-1], tf.ones_like(_UpperCAmelCase )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE__ : Optional[int] = global_attention_mask
return config, inputs_dict
def __lowercase( self : Optional[int] , a_ : int , a_ : Any )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = TFLEDModel(config=_UpperCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE__ : List[str] = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : int = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Any = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : str = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__ : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1e-3 )
def _a ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Optional[int]=None , lowercase__ : Tuple=None , lowercase__ : str=None , lowercase__ : Tuple=None , ):
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Tuple = tf.cast(tf.math.not_equal(lowerCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowercase_ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowercase_ = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFLEDModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple = ConfigTester(self , config_class=_UpperCAmelCase )
def __lowercase( self : Tuple )-> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
def __lowercase( self : Any )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : str = tf.zeros_like(inputs_dict['attention_mask'] )
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(a_ : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(a_ : List[str] ):
SCREAMING_SNAKE_CASE__ : int = [t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE__ : str = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Any = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[str] = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : Any = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def __lowercase( self : Dict )-> Union[str, Any]:
"""simple docstring"""
pass
def __lowercase( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
pass
def _a ( lowercase__ : Tuple ):
'''simple docstring'''
return tf.constant(lowerCAmelCase__ , dtype=tf.intaa )
SCREAMING_SNAKE_CASE__ : Dict = 1E-4
@slow
@require_tf
class snake_case ( unittest.TestCase ):
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
SCREAMING_SNAKE_CASE__ : str = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE__ : int = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE__ : Any = prepare_led_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : Dict = (1, 1024, 768)
self.assertEqual(output.shape , _UpperCAmelCase )
# change to expected output here
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-3 )
def __lowercase( self : str )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
SCREAMING_SNAKE_CASE__ : Optional[Any] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE__ : Dict = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE__ : Tuple = prepare_led_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = model(**_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : Dict = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , _UpperCAmelCase )
# change to expected output here
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-3 , rtol=1e-3 )
| 714 | from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case :
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
def __lowercase( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __lowercase( self : Dict )-> Union[str, Any]:
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __lowercase( self : Tuple )-> torch.Tensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = torch.arange(self.height * self.width )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(a_ , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.shape
SCREAMING_SNAKE_CASE__ : Tuple = int(np.prod(a_ ) )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_coords()
SCREAMING_SNAKE_CASE__ : Dict = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
SCREAMING_SNAKE_CASE__ : Any = self.get_camera_rays(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = rays.view(a_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __lowercase( self : Optional[Any] , a_ : torch.Tensor )-> torch.Tensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
SCREAMING_SNAKE_CASE__ : str = coords.view(a_ , -1 , 2 )
SCREAMING_SNAKE_CASE__ : List[Any] = self.resolution()
SCREAMING_SNAKE_CASE__ : str = self.fov()
SCREAMING_SNAKE_CASE__ : Any = (flat.float() / (res - 1)) * 2 - 1
SCREAMING_SNAKE_CASE__ : Any = fracs * torch.tan(fov / 2 )
SCREAMING_SNAKE_CASE__ : List[str] = fracs.view(a_ , -1 , 2 )
SCREAMING_SNAKE_CASE__ : str = (
self.z.view(a_ , 1 , 3 )
+ self.x.view(a_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(a_ , 1 , 3 ) * fracs[:, :, 1:]
)
SCREAMING_SNAKE_CASE__ : Tuple = directions / directions.norm(dim=-1 , keepdim=a_ )
SCREAMING_SNAKE_CASE__ : Any = torch.stack(
[
torch.broadcast_to(self.origin.view(a_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(a_ , *a_ , 2 , 3 )
def __lowercase( self : Optional[int] , a_ : int , a_ : int )-> "DifferentiableProjectiveCamera":
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=a_ , height=a_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def _a ( lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : str = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([np.sin(lowercase__ ), np.cos(lowercase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
SCREAMING_SNAKE_CASE__ : Tuple = -z * 4
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([np.cos(lowercase__ ), -np.sin(lowercase__ ), 0.0] )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.cross(lowercase__ , lowercase__ )
origins.append(lowercase__ )
xs.append(lowercase__ )
ys.append(lowercase__ )
zs.append(lowercase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , width=lowercase__ , height=lowercase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase__ )) , )
| 636 | 0 |
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=__lowerCAmelCase ):
lowercase_ = ['onnx']
def __init__( self : Optional[Any] , *a_ : Tuple , **a_ : int )-> int:
"""simple docstring"""
requires_backends(self , ['onnx'] )
@classmethod
def __lowercase( cls : Optional[Any] , *a_ : Optional[Any] , **a_ : Optional[int] )-> int:
"""simple docstring"""
requires_backends(cls , ['onnx'] )
@classmethod
def __lowercase( cls : List[Any] , *a_ : List[str] , **a_ : List[Any] )-> List[str]:
"""simple docstring"""
requires_backends(cls , ['onnx'] )
| 715 | import requests
SCREAMING_SNAKE_CASE__ : int = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(f'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 636 | 0 |
from __future__ import annotations
def _a ( lowercase__ : List[Any] ):
'''simple docstring'''
return [ord(_lowerCamelCase ) - 96 for elem in plain]
def _a ( lowercase__ : Tuple ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , _lowerCamelCase )
print('Decoded:' , decode(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 716 | import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger()
@dataclass
class snake_case :
lowercase_ = 42
lowercase_ = field(default_factory=UpperCamelCase_ )
lowercase_ = field(default_factory=UpperCamelCase_ )
def __lowercase( self : Dict , a_ : Dict , a_ : Tensor , a_ : Tensor )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = len(list(m.modules() ) ) == 1 or isinstance(a_ , nn.Convad ) or isinstance(a_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a_ )
def __call__( self : Tuple , a_ : Tensor )-> Any:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a_ )
[x.remove() for x in self.handles]
return self
@property
def __lowercase( self : Tuple )-> int:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class snake_case :
lowercase_ = 42
lowercase_ = 42
lowercase_ = 1
lowercase_ = field(default_factory=UpperCamelCase_ )
lowercase_ = field(default_factory=UpperCamelCase_ )
lowercase_ = True
def __call__( self : List[Any] , a_ : Tensor )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = Tracker(self.dest )(a_ ).parametrized
SCREAMING_SNAKE_CASE__ : Optional[int] = Tracker(self.src )(a_ ).parametrized
SCREAMING_SNAKE_CASE__ : List[str] = list(filter(lambda a_ : type(a_ ) not in self.src_skip , a_ ) )
SCREAMING_SNAKE_CASE__ : Dict = list(filter(lambda a_ : type(a_ ) not in self.dest_skip , a_ ) )
if len(a_ ) != len(a_ ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(a_ )} operations while'''
F''' destination module has {len(a_ )}.''' )
for dest_m, src_m in zip(a_ , a_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class snake_case ( nn.Module ):
def __init__( self : List[Any] , a_ : nn.Module )-> Dict:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), F'''Unexpected layer name {k}'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a_ ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
SCREAMING_SNAKE_CASE__ : Any = nn.ModuleDict(a_ )
def __lowercase( self : Tuple , a_ : Tensor )-> Dict:
"""simple docstring"""
return get_trunk_forward_outputs(
a_ , out_feat_keys=a_ , feature_blocks=self._feature_blocks , )
class snake_case ( UpperCamelCase_ ):
def __lowercase( self : Optional[Any] , a_ : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Union[str, Any] , a_ : str )-> Callable[[], Tuple[nn.Module, Dict]]:
"""simple docstring"""
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE__ : Any = self.convert_name_to_timm(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = partial(lambda: (timm.create_model(a_ , pretrained=a_ ).eval(), None) )
else:
SCREAMING_SNAKE_CASE__ : List[str] = super().__getitem__(a_ )
return val
class snake_case ( UpperCamelCase_ ):
def __getitem__( self : Any , a_ : str )-> Callable[[], nn.Module]:
"""simple docstring"""
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE__ : Any = RegNetModel
else:
SCREAMING_SNAKE_CASE__ : Any = RegNetForImageClassification
return val
def _a ( lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE__ : Tuple = from_state_dict[from_key].clone()
print(f'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def _a ( lowercase__ : str , lowercase__ : Callable[[], nn.Module] , lowercase__ : Callable[[], nn.Module] , lowercase__ : RegNetConfig , lowercase__ : Path , lowercase__ : bool = True , ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = from_model_func()
SCREAMING_SNAKE_CASE__ : int = our_model_func(lowercase__ ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = ModuleTransfer(src=lowercase__ , dest=lowercase__ , raise_if_mismatch=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(lowercase__ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE__ : int = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
SCREAMING_SNAKE_CASE__ : Optional[Any] = manually_copy_vissl_head(lowercase__ , our_model.state_dict() , lowercase__ )
our_model.load_state_dict(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = our_model(lowercase__ , output_hidden_states=lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = (
our_outputs.logits if isinstance(lowercase__ , lowercase__ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE__ : List[Any] = from_model(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = from_output[-1] if type(lowercase__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = our_outputs.hidden_states[-1]
assert torch.allclose(lowercase__ , lowercase__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=lowercase__ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2_24 if 'seer' not in name else 3_84
# we can use the convnext one
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=lowercase__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=lowercase__ , )
print(f'''Pushed {name}''' )
def _a ( lowercase__ : Path , lowercase__ : str = None , lowercase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE__ : Tuple = 10_00
SCREAMING_SNAKE_CASE__ : Tuple = (1, num_labels)
SCREAMING_SNAKE_CASE__ : str = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE__ : List[str] = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' ) ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = idalabel
SCREAMING_SNAKE_CASE__ : Tuple = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Any = partial(lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
SCREAMING_SNAKE_CASE__ : List[Any] = NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE__ : Dict = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowercase__ : str , lowercase__ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE__ : str = torch.hub.load_state_dict_from_url(lowercase__ , model_dir=str(lowercase__ ) , map_location='cpu' )
SCREAMING_SNAKE_CASE__ : Tuple = model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE__ : str = files['classy_state_dict']['base_model']['model']
SCREAMING_SNAKE_CASE__ : str = model_state_dict['trunk']
model.load_state_dict(lowercase__ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE__ : Any = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : int = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : List[Any] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
SCREAMING_SNAKE_CASE__ : List[Any] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE__ : Any = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowercase__ , lowercase__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowercase__ , lowercase__ , lowercase__ , )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 636 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case :
def __init__( self : Tuple , a_ : Optional[Any] , a_ : List[str]=13 , a_ : Union[str, Any]=30 , a_ : Optional[int]=2 , a_ : Optional[int]=3 , a_ : Union[str, Any]=True , a_ : List[Any]=True , a_ : List[str]=32 , a_ : Optional[Any]=2 , a_ : str=4 , a_ : List[Any]=37 , a_ : Tuple="gelu" , a_ : Tuple=0.1 , a_ : Optional[Any]=0.1 , a_ : List[str]=10 , a_ : List[Any]=0.02 , a_ : int=3 , a_ : List[str]=0.6 , a_ : Union[str, Any]=None , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = parent
SCREAMING_SNAKE_CASE__ : str = batch_size
SCREAMING_SNAKE_CASE__ : Any = image_size
SCREAMING_SNAKE_CASE__ : List[str] = patch_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : List[str] = use_labels
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] = mask_ratio
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ : str = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowercase( self : int )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowercase( self : List[str] )-> int:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __lowercase( self : Tuple , a_ : Any , a_ : str , a_ : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFViTMAEModel(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = model(__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : Optional[Any] , a_ : List[Any] , a_ : Optional[Any] , a_ : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = TFViTMAEForPreTraining(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = model(__lowerCamelCase , training=__lowerCamelCase )
# expected sequence length = num_patches
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (self.image_size // self.patch_size) ** 2
SCREAMING_SNAKE_CASE__ : Dict = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : List[Any] = 1
SCREAMING_SNAKE_CASE__ : Dict = TFViTMAEForPreTraining(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : str = model(__lowerCamelCase , training=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __lowercase( self : str )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.prepare_config_and_inputs()
(SCREAMING_SNAKE_CASE__) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( _A , _A , unittest.TestCase ):
lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase_ = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowercase( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFViTMAEModelTester(self )
SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def __lowercase( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def __lowercase( self : int )-> List[Any]:
"""simple docstring"""
pass
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , tf.keras.layers.Layer ) )
def __lowercase( self : str )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __lowercase( self : Any )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __lowercase( self : Union[str, Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def __lowercase( self : int )-> Union[str, Any]:
"""simple docstring"""
# make the mask reproducible
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Any = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Any = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = model(__lowerCamelCase , noise=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = copy.deepcopy(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ : List[str] = model(**__lowerCamelCase , noise=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = outputs_dict[0].numpy()
SCREAMING_SNAKE_CASE__ : Dict = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
# make the mask reproducible
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(a_ : List[str] ):
SCREAMING_SNAKE_CASE__ : Any = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : int = v.numpy()
else:
SCREAMING_SNAKE_CASE__ : Dict = np.array(__lowerCamelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Any = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = prepare_numpy_arrays(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(__lowerCamelCase , noise=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = model(**__lowerCamelCase , noise=__lowerCamelCase )
self.assert_outputs_same(__lowerCamelCase , __lowerCamelCase )
def __lowercase( self : Dict , a_ : List[Any] , a_ : List[str] , a_ : Optional[Any] )-> Optional[int]:
"""simple docstring"""
# make masks reproducible
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ : Tuple = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE__ : Dict = tf.constant(__lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf_noise
super().check_pt_tf_models(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __lowercase( self : Dict )-> List[str]:
"""simple docstring"""
# make mask reproducible
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Any = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__lowerCamelCase )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(__lowerCamelCase , __lowerCamelCase ),)
if isinstance(__lowerCamelCase , __lowerCamelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__lowerCamelCase , '_keras_serializable' , __lowerCamelCase )
}
SCREAMING_SNAKE_CASE__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.convert_to_tensor(__lowerCamelCase )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
SCREAMING_SNAKE_CASE__ : Dict = main_layer_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
SCREAMING_SNAKE_CASE__ : Tuple = tf.keras.Model(__lowerCamelCase , outputs=main_layer(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : str = os.path.join(__lowerCamelCase , 'keras_model.h5' )
model.save(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = tf.keras.models.load_model(
__lowerCamelCase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__lowerCamelCase , tf.keras.Model )
SCREAMING_SNAKE_CASE__ : str = model(__lowerCamelCase )
self.assert_outputs_same(__lowerCamelCase , __lowerCamelCase )
@slow
def __lowercase( self : Optional[Any] )-> List[str]:
"""simple docstring"""
# make mask reproducible
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = model(__lowerCamelCase , noise=__lowerCamelCase )
if model_class.__name__ == "TFViTMAEModel":
SCREAMING_SNAKE_CASE__ : Tuple = outputs.last_hidden_state.numpy()
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
else:
SCREAMING_SNAKE_CASE__ : Tuple = outputs.logits.numpy()
SCREAMING_SNAKE_CASE__ : str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = model_class.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = model(__lowerCamelCase , noise=__lowerCamelCase )
if model_class.__name__ == "TFViTMAEModel":
SCREAMING_SNAKE_CASE__ : Dict = after_outputs["last_hidden_state"].numpy()
SCREAMING_SNAKE_CASE__ : List[str] = 0
else:
SCREAMING_SNAKE_CASE__ : str = after_outputs["logits"].numpy()
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCamelCase , 1e-5 )
def __lowercase( self : str )-> Union[str, Any]:
"""simple docstring"""
# make mask reproducible
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = model(__lowerCamelCase , noise=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
SCREAMING_SNAKE_CASE__ : int = model_class.from_config(model.config )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_model(__lowerCamelCase ) # Build model
new_model.set_weights(model.get_weights() )
SCREAMING_SNAKE_CASE__ : str = new_model(__lowerCamelCase , noise=__lowerCamelCase )
self.assert_outputs_same(__lowerCamelCase , __lowerCamelCase )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowercase( self : int )-> int:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def __lowercase( self : Tuple )-> Any:
"""simple docstring"""
pass
@slow
def __lowercase( self : str )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(__lowerCamelCase )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def __lowercase( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ : List[Any] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(images=__lowerCamelCase , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
SCREAMING_SNAKE_CASE__ : Any = ViTMAEConfig()
SCREAMING_SNAKE_CASE__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**__lowerCamelCase , noise=__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
| 717 | import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'OwlViTImageProcessor'
lowercase_ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[str] , a_ : List[Any]=None , a_ : str=None , **a_ : Any )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE__ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a_ , a_ )
def __call__( self : Any , a_ : Optional[int]=None , a_ : Tuple=None , a_ : List[Any]=None , a_ : Tuple="max_length" , a_ : str="np" , **a_ : Any )-> int:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(a_ , a_ ) or (isinstance(a_ , a_ ) and not isinstance(text[0] , a_ )):
SCREAMING_SNAKE_CASE__ : Tuple = [self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )]
elif isinstance(a_ , a_ ) and isinstance(text[0] , a_ ):
SCREAMING_SNAKE_CASE__ : Any = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE__ : str = max([len(a_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(a_ ) != max_num_queries:
SCREAMING_SNAKE_CASE__ : Tuple = t + [' '] * (max_num_queries - len(a_ ))
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )
encodings.append(a_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
SCREAMING_SNAKE_CASE__ : Dict = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ : List[Any] = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ : str = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ : Dict = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
SCREAMING_SNAKE_CASE__ : Optional[int] = BatchEncoding()
SCREAMING_SNAKE_CASE__ : List[str] = input_ids
SCREAMING_SNAKE_CASE__ : Tuple = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE__ : Any = BatchEncoding()
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor(
a_ , return_tensors=a_ , **a_ ).pixel_values
SCREAMING_SNAKE_CASE__ : Dict = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Dict = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def __lowercase( self : str , *a_ : List[str] , **a_ : int )-> List[Any]:
"""simple docstring"""
return self.image_processor.post_process(*a_ , **a_ )
def __lowercase( self : Tuple , *a_ : List[str] , **a_ : str )-> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*a_ , **a_ )
def __lowercase( self : Optional[Any] , *a_ : str , **a_ : Dict )-> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*a_ , **a_ )
def __lowercase( self : Optional[int] , *a_ : Tuple , **a_ : Tuple )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def __lowercase( self : Tuple , *a_ : Tuple , **a_ : Tuple )-> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
def __lowercase( self : Tuple )-> Any:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a_ , )
return self.image_processor_class
@property
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a_ , )
return self.image_processor
| 636 | 0 |
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def _a ( lowercase__ : float ):
'''simple docstring'''
assert type(a_ ) in (int, float) and decimal == int(a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = ''''''
SCREAMING_SNAKE_CASE__ : str = False
if decimal < 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
decimal *= -1
while decimal > 0:
SCREAMING_SNAKE_CASE__ : List[Any] = divmod(a_ , 16 )
SCREAMING_SNAKE_CASE__ : Tuple = values[remainder] + hexadecimal
SCREAMING_SNAKE_CASE__ : Any = '''0x''' + hexadecimal
if negative:
SCREAMING_SNAKE_CASE__ : Any = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 | class snake_case ( UpperCamelCase_ ):
pass
class snake_case ( UpperCamelCase_ ):
pass
class snake_case :
def __init__( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [
[],
[],
[],
]
def __lowercase( self : int , a_ : int , a_ : int )-> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(a_ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def __lowercase( self : int )-> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self : Any )-> str:
"""simple docstring"""
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class snake_case :
def __init__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = []
def __lowercase( self : List[str] , a_ : int )-> None:
"""simple docstring"""
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(a_ )
def __lowercase( self : int )-> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = min(self.queue )
self.queue.remove(a_ )
return data
def __str__( self : List[str] )-> str:
"""simple docstring"""
return str(self.queue )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 636 | 0 |
def _a ( lowercase__ : Optional[int] ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
SCREAMING_SNAKE_CASE__ : List[Any] = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
while repunit:
SCREAMING_SNAKE_CASE__ : Any = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _a ( lowercase__ : List[Any] = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_A ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 719 | from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _a ( lowercase__ : List[str] ):
'''simple docstring'''
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE__ : str = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase__ ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[int] , *lowercase__ : int , **lowercase__ : Tuple ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase__ , **lowercase__ )
return wrapper
| 636 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
@dataclass
class snake_case ( _a ):
lowercase_ = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : Optional[Any] , **a_ : str )-> str:
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__ : str = deprecated_arg[3:]
setattr(self , snake_case_ , not kwargs.pop(snake_case_ ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop('torchscript' , self.torchscript )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**snake_case_ )
lowercase_ = field(default=_a , metadata={'help': 'Trace the models using torchscript'} )
lowercase_ = field(default=_a , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
lowercase_ = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def __lowercase( self : List[Any] )-> Tuple["torch.device", int]:
"""simple docstring"""
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
SCREAMING_SNAKE_CASE__ : str = torch.device('cpu' )
SCREAMING_SNAKE_CASE__ : Dict = 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE__ : Any = xm.xla_device()
SCREAMING_SNAKE_CASE__ : Any = 0
else:
SCREAMING_SNAKE_CASE__ : Dict = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE__ : Dict = torch.cuda.device_count()
return device, n_gpu
@property
def __lowercase( self : List[str] )-> Tuple:
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def __lowercase( self : Union[str, Any] )-> int:
"""simple docstring"""
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __lowercase( self : List[str] )-> "torch.device":
"""simple docstring"""
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def __lowercase( self : List[str] )-> int:
"""simple docstring"""
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def __lowercase( self : List[str] )-> Dict:
"""simple docstring"""
return self.n_gpu > 0
| 720 | import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _a ( lowercase__ : int ):
'''simple docstring'''
if is_torch_version('<' , '2.0.0' ) or not hasattr(lowercase__ , '_dynamo' ):
return False
return isinstance(lowercase__ , torch._dynamo.eval_frame.OptimizedModule )
def _a ( lowercase__ : Optional[Any] , lowercase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE__ : Dict = is_compiled_module(lowercase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : Tuple = model
SCREAMING_SNAKE_CASE__ : int = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Any = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(lowercase__ , 'forward' )
SCREAMING_SNAKE_CASE__ : str = model.__dict__.pop('_original_forward' , lowercase__ )
if original_forward is not None:
while hasattr(lowercase__ , '__wrapped__' ):
SCREAMING_SNAKE_CASE__ : Dict = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE__ : Dict = forward
if getattr(lowercase__ , '_converted_to_transformer_engine' , lowercase__ ):
convert_model(lowercase__ , to_transformer_engine=lowercase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : List[Any] = model
SCREAMING_SNAKE_CASE__ : Optional[Any] = compiled_model
return model
def _a ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _a ( lowercase__ : str , lowercase__ : Optional[Any] ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowercase__ , lowercase__ )
elif PartialState().local_process_index == 0:
torch.save(lowercase__ , lowercase__ )
@contextmanager
def _a ( **lowercase__ : str ):
'''simple docstring'''
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE__ : int = str(lowercase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if not hasattr(lowercase__ , '__qualname__' ) and not hasattr(lowercase__ , '__name__' ):
SCREAMING_SNAKE_CASE__ : Any = getattr(lowercase__ , '__class__' , lowercase__ )
if hasattr(lowercase__ , '__qualname__' ):
return obj.__qualname__
if hasattr(lowercase__ , '__name__' ):
return obj.__name__
return str(lowercase__ )
def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ):
'''simple docstring'''
for key, value in source.items():
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = destination.setdefault(lowercase__ , {} )
merge_dicts(lowercase__ , lowercase__ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = value
return destination
def _a ( lowercase__ : int = None ):
'''simple docstring'''
if port is None:
SCREAMING_SNAKE_CASE__ : int = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 636 | 0 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class snake_case ( tf.keras.layers.Layer ):
def __init__( self : int , a_ : str , a_ : Dict , a_ : Union[str, Any] = None , a_ : List[str] = None )-> Tuple:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pad_token_id
SCREAMING_SNAKE_CASE__ : List[str] = max_length
SCREAMING_SNAKE_CASE__ : Dict = vocab
SCREAMING_SNAKE_CASE__ : Optional[int] = merges
SCREAMING_SNAKE_CASE__ : Dict = BytePairTokenizer(_lowercase , _lowercase , sequence_length=_lowercase )
@classmethod
def __lowercase( cls : Any , a_ : int , *a_ : Optional[Any] , **a_ : Tuple )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [""" """.join(_lowercase ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.get_vocab()
return cls(_lowercase , _lowercase , *_lowercase , **_lowercase )
@classmethod
def __lowercase( cls : Dict , a_ : Optional[int] , *a_ : Dict , **a_ : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = GPTaTokenizer.from_pretrained(_lowercase , *_lowercase , **_lowercase )
return cls.from_tokenizer(_lowercase , *_lowercase , **_lowercase )
@classmethod
def __lowercase( cls : Dict , a_ : Optional[Any] )-> Dict:
"""simple docstring"""
return cls(**_lowercase )
def __lowercase( self : int )-> Optional[int]:
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __lowercase( self : Union[str, Any] , a_ : int , a_ : Union[str, Any] = None )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.tf_tokenizer(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = tf.ones_like(_lowercase )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE__ : List[str] = pad_model_inputs(
_lowercase , max_seq_length=_lowercase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 721 | from __future__ import annotations
def _a ( lowercase__ : list[int | float] , lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
if len(lowercase__ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(lowercase__ )
or left < -len(lowercase__ )
or right >= len(lowercase__ )
or right < -len(lowercase__ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : int = find_max(lowercase__ , lowercase__ , lowercase__ ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Tuple = find_max(lowercase__ , mid + 1 , lowercase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 636 | 0 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = TypeVar("DatasetType", Dataset, IterableDataset)
def _a ( lowercase__ : List[DatasetType] , lowercase__ : Optional[List[float]] = None , lowercase__ : Optional[int] = None , lowercase__ : Optional[DatasetInfo] = None , lowercase__ : Optional[NamedSplit] = None , lowercase__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(snake_case__ ):
if not isinstance(snake_case__ , (Dataset, IterableDataset) ):
if isinstance(snake_case__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'is an empty dataset dictionary.' )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(snake_case__ )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case__ ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case__ ).__name__}.''' )
if i == 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = (
(Dataset, IterableDataset) if isinstance(snake_case__ , snake_case__ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case__ , snake_case__ ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
snake_case__ , snake_case__ , snake_case__ , info=snake_case__ , split=snake_case__ , stopping_strategy=snake_case__ )
else:
return _interleave_iterable_datasets(
snake_case__ , snake_case__ , snake_case__ , info=snake_case__ , split=snake_case__ , stopping_strategy=snake_case__ )
def _a ( lowercase__ : List[DatasetType] , lowercase__ : Optional[DatasetInfo] = None , lowercase__ : Optional[NamedSplit] = None , lowercase__ : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(snake_case__ ):
if not isinstance(snake_case__ , (Dataset, IterableDataset) ):
if isinstance(snake_case__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'is an empty dataset dictionary.' )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(snake_case__ )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case__ ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case__ ).__name__}.''' )
if i == 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = (
(Dataset, IterableDataset) if isinstance(snake_case__ , snake_case__ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case__ , snake_case__ ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(snake_case__ , info=snake_case__ , split=snake_case__ , axis=snake_case__ )
else:
return _concatenate_iterable_datasets(snake_case__ , info=snake_case__ , split=snake_case__ , axis=snake_case__ )
| 700 | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _a ( lowercase__ : Any ):
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _a ( lowercase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _a ( lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = [state.process_index]
SCREAMING_SNAKE_CASE__ : Any = gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, f'''{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _a ( lowercase__ : int ):
'''simple docstring'''
if state.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.arange(state.num_processes ).to(state.device )
SCREAMING_SNAKE_CASE__ : Any = pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE__ : List[Any] = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = reduce(lowercase__ , 'sum' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}'''
def _a ( lowercase__ : int ):
'''simple docstring'''
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE__ : Any = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = reduce(lowercase__ , 'mean' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}'''
def _a ( lowercase__ : int ):
'''simple docstring'''
main()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = PartialState()
state.print(f'''State: {state}''' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 636 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class snake_case ( lowercase__ , unittest.TestCase ):
lowercase_ = TextToVideoSDPipeline
lowercase_ = TEXT_TO_IMAGE_PARAMS
lowercase_ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowercase_ = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def __lowercase( self : int )-> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
SCREAMING_SNAKE_CASE__ : List[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a_ , set_alpha_to_one=a_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
SCREAMING_SNAKE_CASE__ : Any = CLIPTextModel(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE__ : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowercase( self : Dict , a_ : Optional[Any] , a_ : Dict=0 )-> Any:
"""simple docstring"""
if str(a_ ).startswith('mps' ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.manual_seed(a_ )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=a_ ).manual_seed(a_ )
SCREAMING_SNAKE_CASE__ : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def __lowercase( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = TextToVideoSDPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'np'
SCREAMING_SNAKE_CASE__ : Any = sd_pipe(**a_ ).frames
SCREAMING_SNAKE_CASE__ : Union[str, Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=a_ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowercase( self : Optional[int] )-> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a_ , expected_max_diff=1e-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __lowercase( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def __lowercase( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
pass
def __lowercase( self : List[str] )-> Optional[Any]:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class snake_case ( unittest.TestCase ):
def __lowercase( self : Any )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
SCREAMING_SNAKE_CASE__ : str = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
SCREAMING_SNAKE_CASE__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE__ : Any = pipe.to('cuda' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'Spiderman is surfing'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = pipe(a_ , generator=a_ , num_inference_steps=25 , output_type='pt' ).frames
SCREAMING_SNAKE_CASE__ : int = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __lowercase( self : Any )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
SCREAMING_SNAKE_CASE__ : Tuple = pipe.to('cuda' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'Spiderman is surfing'
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = pipe(a_ , generator=a_ , num_inference_steps=2 , output_type='pt' ).frames
SCREAMING_SNAKE_CASE__ : str = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 701 | import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : Any = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , a_ : Optional[int] , a_ : Dict=7 , a_ : Any=3 , a_ : Any=18 , a_ : int=30 , a_ : int=400 , a_ : List[Any]=None , a_ : int=True , a_ : int=True , a_ : Dict=None , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'height': 20, 'width': 20}
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Any = num_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE__ : List[str] = min_resolution
SCREAMING_SNAKE_CASE__ : Dict = max_resolution
SCREAMING_SNAKE_CASE__ : List[Any] = size
SCREAMING_SNAKE_CASE__ : Tuple = do_normalize
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_convert_rgb
SCREAMING_SNAKE_CASE__ : List[str] = [512, 1024, 2048, 4096]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __lowercase( self : Dict )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(a_ , stream=a_ ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PixaStructImageProcessor if is_vision_available() else None
def __lowercase( self : List[str] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = PixaStructImageProcessingTester(self )
@property
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , 'do_normalize' ) )
self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor_tester.prepare_dummy_image()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ : List[Any] = 2048
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(a_ , return_tensors='pt' , max_patches=a_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase( self : Any )-> Any:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
SCREAMING_SNAKE_CASE__ : int = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(a_ ):
SCREAMING_SNAKE_CASE__ : Dict = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
SCREAMING_SNAKE_CASE__ : List[Any] = 'Hello'
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processor(
a_ , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
SCREAMING_SNAKE_CASE__ : str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PixaStructImageProcessor if is_vision_available() else None
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = PixaStructImageProcessingTester(self , num_channels=4 )
SCREAMING_SNAKE_CASE__ : Dict = 3
@property
def __lowercase( self : Any )-> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase( self : Dict )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , 'do_normalize' ) )
self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) )
def __lowercase( self : str )-> Union[str, Any]:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 636 | 0 |
from math import factorial
def _a ( lowercase__ : int = 1_00 ):
'''simple docstring'''
return sum(map(lowercase__ , str(factorial(lowercase__ ) ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 702 | import heapq as hq
import math
from collections.abc import Iterator
class snake_case :
def __init__( self : str , a_ : str )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = str(id_ )
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : int , a_ : Tuple )-> Union[str, Any]:
"""simple docstring"""
return self.key < other.key
def __repr__( self : Any )-> Dict:
"""simple docstring"""
return self.id
def __lowercase( self : Optional[Any] , a_ : int )-> List[str]:
"""simple docstring"""
self.neighbors.append(a_ )
def __lowercase( self : int , a_ : int , a_ : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = weight
def _a ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Dict ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowercase__ )
graph[b - 1].add_edge(graph[a - 1] , lowercase__ )
def _a ( lowercase__ : list , lowercase__ : Vertex ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = []
for u in graph:
SCREAMING_SNAKE_CASE__ : Dict = math.inf
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : int = graph[:]
while q:
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(lowercase__ )
q.remove(lowercase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE__ : int = u
SCREAMING_SNAKE_CASE__ : Any = u.edges[v.id]
for i in range(1 , len(lowercase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _a ( lowercase__ : list , lowercase__ : Vertex ):
'''simple docstring'''
for u in graph:
SCREAMING_SNAKE_CASE__ : List[str] = math.inf
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : Tuple = list(lowercase__ )
hq.heapify(lowercase__ )
while h:
SCREAMING_SNAKE_CASE__ : Optional[int] = hq.heappop(lowercase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE__ : List[str] = u
SCREAMING_SNAKE_CASE__ : Dict = u.edges[v.id]
hq.heapify(lowercase__ )
for i in range(1 , len(lowercase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 636 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = """Hello world! cécé herlolip"""
def _a ( lowercase__ : str , lowercase__ : str , lowercase__ : bool ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = FairseqRobertaModel.from_pretrained(snake_case_ )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : str = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print('Our RoBERTa config:' , snake_case_ )
SCREAMING_SNAKE_CASE__ : List[str] = XLMRobertaXLForSequenceClassification(snake_case_ ) if classification_head else XLMRobertaXLForMaskedLM(snake_case_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Any = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Any = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Any = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"].dense.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : int = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(snake_case_ ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : Any = model(snake_case_ )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.classification_heads["mnli"](roberta.extract_features(snake_case_ ) )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model(snake_case_ )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE__ : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(snake_case_ ).mkdir(parents=snake_case_ , exist_ok=snake_case_ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 703 | def _a ( lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _a ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 636 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class snake_case ( lowercase__ ):
lowercase_ = 'decision_transformer'
lowercase_ = ['past_key_values']
lowercase_ = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : str , a_ : Optional[int]=17 , a_ : Any=4 , a_ : int=128 , a_ : Tuple=4096 , a_ : List[Any]=True , a_ : int=1 , a_ : Any=1024 , a_ : Optional[Any]=3 , a_ : List[str]=1 , a_ : str=None , a_ : Dict="relu" , a_ : int=0.1 , a_ : Tuple=0.1 , a_ : Optional[int]=0.1 , a_ : Tuple=1e-5 , a_ : str=0.02 , a_ : Optional[int]=True , a_ : Optional[int]=True , a_ : Tuple=5_0256 , a_ : List[Any]=5_0256 , a_ : int=False , a_ : Optional[Any]=False , **a_ : List[Any] , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dim
SCREAMING_SNAKE_CASE__ : Tuple = act_dim
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = max_ep_len
SCREAMING_SNAKE_CASE__ : List[str] = action_tanh
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : str = n_positions
SCREAMING_SNAKE_CASE__ : int = n_layer
SCREAMING_SNAKE_CASE__ : List[str] = n_head
SCREAMING_SNAKE_CASE__ : Dict = n_inner
SCREAMING_SNAKE_CASE__ : Tuple = activation_function
SCREAMING_SNAKE_CASE__ : List[Any] = resid_pdrop
SCREAMING_SNAKE_CASE__ : Tuple = embd_pdrop
SCREAMING_SNAKE_CASE__ : Dict = attn_pdrop
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = scale_attn_weights
SCREAMING_SNAKE_CASE__ : List[str] = use_cache
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE__ : List[Any] = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE__ : int = bos_token_id
SCREAMING_SNAKE_CASE__ : Any = eos_token_id
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
| 704 | from math import factorial, radians
def _a ( lowercase__ : float , lowercase__ : int = 18 , lowercase__ : int = 10 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE__ : int = radians(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = angle_in_radians
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 636 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _a ( lowercase__ : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : np.ndarray ):
'''simple docstring'''
__A : int = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowercase__ , lowercase__ )
# Predict target for test data
__A : Union[str, Any] = xgb.predict(lowercase__ )
__A : int = predictions.reshape(len(lowercase__ ) , 1 )
return predictions
def _a ( ):
'''simple docstring'''
__A : Union[str, Any] = fetch_california_housing()
__A : List[str] = data_handling(lowercase__ )
__A : List[Any] = train_test_split(
lowercase__ , lowercase__ , test_size=0.25 , random_state=1 )
__A : Any = xgboost(lowercase__ , lowercase__ , lowercase__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(lowercase__ , lowercase__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(lowercase__ , lowercase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 705 | import math
def _a ( lowercase__ : int ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
SCREAMING_SNAKE_CASE__ : Tuple = range(3 , int(math.sqrt(lowercase__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _a ( lowercase__ : List[str] , lowercase__ : Any=1 , **lowercase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = factor * value
SCREAMING_SNAKE_CASE__ : Dict = value
while not is_prime(lowercase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowercase__ )
return value
| 636 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( __a , __a , unittest.TestCase ):
lowercase_ = StableDiffusionXLImgaImgPipeline
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase_ = PipelineTesterMixin.required_optional_params - {'latents'}
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=a_ , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
SCREAMING_SNAKE_CASE__ : List[str] = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=32 , )
SCREAMING_SNAKE_CASE__ : Dict = CLIPTextModel(a_ )
SCREAMING_SNAKE_CASE__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = CLIPTextModelWithProjection(a_ )
SCREAMING_SNAKE_CASE__ : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __lowercase( self : Union[str, Any] , a_ : str , a_ : Dict=0 )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = image / 2 + 0.5
if str(a_ ).startswith('mps' ):
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.manual_seed(a_ )
else:
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device=a_ ).manual_seed(a_ )
SCREAMING_SNAKE_CASE__ : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def __lowercase( self : int )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionXLImgaImgPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : str = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = sd_pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase( self : List[str] )-> str:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __lowercase( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __lowercase( self : str )-> Optional[int]:
"""simple docstring"""
pass
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionXLImgaImgPipeline(**a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe.to(a_ )
SCREAMING_SNAKE_CASE__ : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
# forward without prompt embeds
SCREAMING_SNAKE_CASE__ : str = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : Any = 3 * ["""this is a negative prompt"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = negative_prompt
SCREAMING_SNAKE_CASE__ : Optional[Any] = 3 * [inputs["""prompt"""]]
SCREAMING_SNAKE_CASE__ : Any = sd_pipe(**a_ )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs(a_ )
SCREAMING_SNAKE_CASE__ : Any = 3 * ["""this is a negative prompt"""]
SCREAMING_SNAKE_CASE__ : List[str] = 3 * [inputs.pop('prompt' )]
(
SCREAMING_SNAKE_CASE__
) : str = sd_pipe.encode_prompt(a_ , negative_prompt=a_ )
SCREAMING_SNAKE_CASE__ : Any = sd_pipe(
**a_ , prompt_embeds=a_ , negative_prompt_embeds=a_ , pooled_prompt_embeds=a_ , negative_pooled_prompt_embeds=a_ , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def __lowercase( self : List[str] )-> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase( self : List[str] , a_ : str , a_ : Dict="cpu" , a_ : str=torch.floataa , a_ : str=0 )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device=a_ ).manual_seed(a_ )
SCREAMING_SNAKE_CASE__ : Any = np.random.RandomState(a_ ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE__ : str = torch.from_numpy(a_ ).to(device=a_ , dtype=a_ )
SCREAMING_SNAKE_CASE__ : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __lowercase( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : int = self.get_inputs(a_ )
SCREAMING_SNAKE_CASE__ : Dict = pipe(**a_ ).images
SCREAMING_SNAKE_CASE__ : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 706 | import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case :
def __init__( self : str , a_ : List[str] , a_ : Tuple=13 , a_ : Dict=30 , a_ : Optional[int]=2 , a_ : Tuple=3 , a_ : Dict=True , a_ : int=True , a_ : Optional[Any]=32 , a_ : List[str]=5 , a_ : Any=4 , a_ : Dict=37 , a_ : Dict="gelu" , a_ : int=0.1 , a_ : Optional[Any]=0.1 , a_ : Any=10 , a_ : List[str]=0.02 , a_ : Any=3 , a_ : List[str]=None , a_ : Optional[int]=2 , )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : int = batch_size
SCREAMING_SNAKE_CASE__ : int = image_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = scope
SCREAMING_SNAKE_CASE__ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE__ : Optional[int] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_patches + 2
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowercase( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowercase( self : List[str] , a_ : List[str] , a_ : Optional[Any] , a_ : str )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = DeiTModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : List[Any] , a_ : List[str] , a_ : List[str] , a_ : List[Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = DeiTForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DeiTForMaskedImageModeling(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : int = model(a_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowercase( self : List[str] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = DeiTForImageClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : int = DeiTForImageClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase( self : int )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = DeiTModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
pass
def __lowercase( self : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def __lowercase( self : str )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a_ )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : List[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def __lowercase( self : str , a_ : str , a_ : Tuple , a_ : Union[str, Any]=False )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE__ : Tuple = model_class(a_ )
model.to(a_ )
model.train()
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**a_ ).loss
loss.backward()
def __lowercase( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**a_ ).loss
loss.backward()
def __lowercase( self : Optional[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[str] = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a_ ),
*get_values(a_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
SCREAMING_SNAKE_CASE__ : int = problem_type['title']
SCREAMING_SNAKE_CASE__ : Tuple = problem_type['num_labels']
SCREAMING_SNAKE_CASE__ : str = model_class(a_ )
model.to(a_ )
model.train()
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
SCREAMING_SNAKE_CASE__ : Any = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a_ ) as warning_list:
SCREAMING_SNAKE_CASE__ : str = model(**a_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __lowercase( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = DeiTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def __lowercase( self : int )-> Dict:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(images=a_ , return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**a_ )
# verify the logits
SCREAMING_SNAKE_CASE__ : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowercase( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(images=a_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : str = inputs.pixel_values.to(a_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
| 636 | 0 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def _a ( lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : List[Any] = 1_60_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = int(round(sample_rate * max_length ) )
if len(__snake_case ) <= sample_length:
return wav
SCREAMING_SNAKE_CASE__ : Optional[int] = randint(0 , len(__snake_case ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class snake_case :
lowercase_ = field(default=UpperCamelCase_ , metadata={'help': 'Name of a dataset from the datasets package'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'A file containing the training audio paths and labels.'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'A file containing the validation audio paths and labels.'} )
lowercase_ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowercase_ = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowercase_ = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
lowercase_ = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowercase_ = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class snake_case :
lowercase_ = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
lowercase_ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowercase_ = field(
default=UpperCamelCase_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def __lowercase( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , __lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , __snake_case , __snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = training_args.get_process_log_level()
logger.setLevel(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__ : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
SCREAMING_SNAKE_CASE__ : List[Any] = DatasetDict()
SCREAMING_SNAKE_CASE__ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'Make sure to set `--audio_column_name` to the correct audio column - one of '
f'''{', '.join(raw_datasets['train'].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'Make sure to set `--label_column_name` to the correct text column - one of '
f'''{', '.join(raw_datasets['train'].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
SCREAMING_SNAKE_CASE__ : List[Any] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
SCREAMING_SNAKE_CASE__ : Optional[Any] = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
SCREAMING_SNAKE_CASE__ : int = feature_extractor.model_input_names[0]
def train_transforms(lowercase__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Dict = []
for audio in batch[data_args.audio_column_name]:
SCREAMING_SNAKE_CASE__ : Any = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feature_extractor(__snake_case , sampling_rate=feature_extractor.sampling_rate )
SCREAMING_SNAKE_CASE__ : Dict = {model_input_name: inputs.get(__snake_case )}
SCREAMING_SNAKE_CASE__ : str = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = [audio['array'] for audio in batch[data_args.audio_column_name]]
SCREAMING_SNAKE_CASE__ : List[Any] = feature_extractor(__snake_case , sampling_rate=feature_extractor.sampling_rate )
SCREAMING_SNAKE_CASE__ : int = {model_input_name: inputs.get(__snake_case )}
SCREAMING_SNAKE_CASE__ : str = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE__ : Optional[int] = raw_datasets['train'].features[data_args.label_column_name].names
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = {}, {}
for i, label in enumerate(__snake_case ):
SCREAMING_SNAKE_CASE__ : str = str(__snake_case )
SCREAMING_SNAKE_CASE__ : Dict = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE__ : Dict = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase__ : Any ):
SCREAMING_SNAKE_CASE__ : int = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__snake_case , references=eval_pred.label_ids )
SCREAMING_SNAKE_CASE__ : List[str] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__snake_case ) , labelaid=__snake_case , idalabel=__snake_case , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ : Any = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__snake_case , output_all_columns=__snake_case )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE__ : List[str] = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__snake_case , output_all_columns=__snake_case )
# Initialize our trainer
SCREAMING_SNAKE_CASE__ : int = Trainer(
model=__snake_case , args=__snake_case , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=__snake_case , tokenizer=__snake_case , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE__ : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE__ : int = last_checkpoint
SCREAMING_SNAKE_CASE__ : int = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE__ : Optional[int] = trainer.evaluate()
trainer.log_metrics('eval' , __snake_case )
trainer.save_metrics('eval' , __snake_case )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE__ : str = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
if __name__ == "__main__":
main()
| 707 | import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self : List[Any] , a_ : Dict , a_ : Any=13 , a_ : Any=7 , a_ : Tuple=True , a_ : Tuple=True , a_ : Optional[int]=False , a_ : Dict=True , a_ : Optional[Any]=99 , a_ : Any=32 , a_ : Dict=5 , a_ : Tuple=4 , a_ : List[str]=37 , a_ : Union[str, Any]="gelu" , a_ : Dict=0.1 , a_ : Tuple=0.1 , a_ : List[str]=512 , a_ : List[str]=16 , a_ : List[str]=2 , a_ : Optional[int]=0.02 , a_ : List[str]=3 , a_ : Union[str, Any]=4 , a_ : Optional[Any]=None , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Dict = batch_size
SCREAMING_SNAKE_CASE__ : Dict = seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = num_labels
SCREAMING_SNAKE_CASE__ : Dict = num_choices
SCREAMING_SNAKE_CASE__ : str = scope
def __lowercase( self : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
def __lowercase( self : Any , a_ : str , a_ : Tuple , a_ : Dict , a_ : Optional[int] , a_ : List[Any] , a_ : Union[str, Any] , a_ : Tuple )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BioGptModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : List[Any] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Tuple , a_ : Optional[Any] , a_ : int , a_ : Optional[int] , a_ : int , a_ : str , a_ : Optional[Any] , )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = BioGptForCausalLM(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase( self : Tuple , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : Any , a_ : Optional[int] , *a_ : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = BioGptModel(config=a_ )
model.to(a_ )
model.eval()
# create attention mask
SCREAMING_SNAKE_CASE__ : Any = torch.ones(input_ids.shape , dtype=torch.long , device=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.seq_length // 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
# first forward pass
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
SCREAMING_SNAKE_CASE__ : str = ids_tensor((1,) , a_ ).item() + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = random_other_next_tokens
# append to next input_ids and attn_mask
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Dict = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=a_ )] , dim=1 , )
# get two different outputs
SCREAMING_SNAKE_CASE__ : str = model(a_ , attention_mask=a_ )['last_hidden_state']
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , past_key_values=a_ , attention_mask=a_ )['last_hidden_state']
# select random slice
SCREAMING_SNAKE_CASE__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : str , a_ : List[Any] , a_ : str , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[Any] , *a_ : List[str] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel(config=a_ ).to(a_ ).eval()
SCREAMING_SNAKE_CASE__ : Dict = torch.ones(input_ids.shape , dtype=torch.long , device=a_ )
# first forward pass
SCREAMING_SNAKE_CASE__ : Any = model(a_ , attention_mask=a_ , use_cache=a_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(a_ , attention_mask=a_ )['last_hidden_state']
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , attention_mask=a_ , past_key_values=a_ )[
'last_hidden_state'
]
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : Any , a_ : List[str] , a_ : Optional[int] , a_ : Any , a_ : Tuple , a_ : Any , *a_ : List[Any] , a_ : Union[str, Any]=False )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = BioGptForCausalLM(a_ )
model.to(a_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __lowercase( self : Union[str, Any] , a_ : List[str] , *a_ : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = BioGptModel(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __lowercase( self : Dict , a_ : Tuple , a_ : Tuple , a_ : List[str] , a_ : Any , a_ : str , *a_ : str )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.num_labels
SCREAMING_SNAKE_CASE__ : str = BioGptForTokenClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , attention_mask=a_ , token_type_ids=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase( self : Any )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowercase_ = (BioGptForCausalLM,) if is_torch_available() else ()
lowercase_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = False
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def __lowercase( self : Tuple )-> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : List[str] = type
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : int )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*a_ )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*a_ , gradient_checkpointing=a_ )
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*a_ )
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*a_ )
def __lowercase( self : str )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*a_ )
@slow
def __lowercase( self : List[str] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(a_ )
SCREAMING_SNAKE_CASE__ : Dict = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE__ : List[str] = 'left'
# Define PAD Token = EOS Token = 50256
SCREAMING_SNAKE_CASE__ : Any = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : Tuple = model.config.eos_token_id
# use different length sentences to test batching
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'Hello, my dog is a little',
'Today, I',
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(a_ , return_tensors='pt' , padding=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = inputs['input_ids'].to(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = model.generate(
input_ids=a_ , attention_mask=inputs['attention_mask'].to(a_ ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(a_ )
SCREAMING_SNAKE_CASE__ : Dict = model.generate(input_ids=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(input_ids=a_ , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(a_ , a_ )
self.assertListEqual(a_ , [non_padded_sentence, padded_sentence] )
@slow
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowercase( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] = 3
SCREAMING_SNAKE_CASE__ : List[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.ne(1 ).to(a_ )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : int = BioGptForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : str = 3
SCREAMING_SNAKE_CASE__ : Any = 'multi_label_classification'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE__ : Any = input_ids.ne(1 ).to(a_ )
SCREAMING_SNAKE_CASE__ : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ : Dict = BioGptForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def __lowercase( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([[2, 4805, 9, 656, 21]] )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ )[0]
SCREAMING_SNAKE_CASE__ : List[str] = 4_2384
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , a_ )
SCREAMING_SNAKE_CASE__ : int = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
@slow
def __lowercase( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE__ : Dict = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(a_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer('COVID-19 is' , return_tensors='pt' ).to(a_ )
SCREAMING_SNAKE_CASE__ : int = model.generate(
**a_ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=a_ , )
SCREAMING_SNAKE_CASE__ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(a_ , a_ )
| 636 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 708 | import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : Optional[Any] = random.Random()
def _a ( lowercase__ : List[str] , lowercase__ : List[Any]=1.0 , lowercase__ : Optional[int]=None , lowercase__ : List[str]=None ):
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = global_rng
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , a_ : Optional[Any] , a_ : Union[str, Any]=7 , a_ : Any=400 , a_ : List[Any]=2000 , a_ : Tuple=1 , a_ : Optional[int]=0.0 , a_ : Optional[Any]=1_6000 , a_ : str=True , a_ : Union[str, Any]=80 , a_ : Dict=16 , a_ : Tuple=64 , a_ : Any="hann_window" , a_ : Union[str, Any]=80 , a_ : List[Any]=7600 , a_ : Optional[Any]=1e-1_0 , a_ : Dict=True , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = min_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = max_seq_length
SCREAMING_SNAKE_CASE__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__ : int = feature_size
SCREAMING_SNAKE_CASE__ : str = padding_value
SCREAMING_SNAKE_CASE__ : Any = sampling_rate
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : int = num_mel_bins
SCREAMING_SNAKE_CASE__ : int = hop_length
SCREAMING_SNAKE_CASE__ : str = win_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = win_function
SCREAMING_SNAKE_CASE__ : List[str] = fmin
SCREAMING_SNAKE_CASE__ : Dict = fmax
SCREAMING_SNAKE_CASE__ : int = mel_floor
SCREAMING_SNAKE_CASE__ : Tuple = return_attention_mask
def __lowercase( self : Dict )-> Dict:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __lowercase( self : List[Any] , a_ : str=False , a_ : List[Any]=False )-> Optional[Any]:
"""simple docstring"""
def _flatten(a_ : int ):
return list(itertools.chain(*a_ ) )
if equal_length:
SCREAMING_SNAKE_CASE__ : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : int = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
def __lowercase( self : Any , a_ : int=False , a_ : Any=False )-> Union[str, Any]:
"""simple docstring"""
if equal_length:
SCREAMING_SNAKE_CASE__ : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Tuple = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : List[str] = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = SpeechTaFeatureExtractor
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = SpeechTaFeatureExtractionTester(self )
def __lowercase( self : Any , a_ : Optional[int] )-> List[str]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_ , axis=0 ) - 1 ) < 1e-3 ) )
def __lowercase( self : Tuple )-> Dict:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ : Tuple = [None, 1600, None]
for max_length, padding in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : str = feat_extract(a_ , padding=a_ , max_length=a_ , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : List[Any] = range(800 , 1400 , 200 )
SCREAMING_SNAKE_CASE__ : int = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , max_length=a_ , padding=a_ )
SCREAMING_SNAKE_CASE__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __lowercase( self : int )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract(
a_ , truncation=a_ , max_length=1000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(
a_ , truncation=a_ , max_length=1000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : str = feat_extract(
a_ , truncation=a_ , max_length=2000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __lowercase( self : Any )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__ : Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Dict = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(audio_target=a_ , padding=a_ , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : int = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : str = feature_extractor(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : Dict )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_ , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE__ : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowercase( self : Tuple )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Dict = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : str = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : List[Any] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : Any = [len(a_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a_ )
def __lowercase( self : str )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : Tuple = [len(a_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : str = min(a_ )
SCREAMING_SNAKE_CASE__ : Any = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : int = feat_extract.pad(
a_ , padding='max_length' , max_length=a_ , truncation=a_ , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __lowercase( self : Optional[int] , a_ : List[str] )-> Any:
"""simple docstring"""
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : int = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__ : List[Any] = ds.sort('id' ).select(range(a_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowercase( self : List[str] )-> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
SCREAMING_SNAKE_CASE__ : List[str] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(a_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , a_ , atol=1e-6 ) )
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : int = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : str = feature_extractor(audio_target=a_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a_ , atol=1e-4 ) )
| 636 | 0 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class snake_case :
lowercase_ = None
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Dict = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowerCAmelCase__ )
def __lowercase( self : int )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(lowerCAmelCase__ , 'feat_extract.json' )
feat_extract_first.to_json_file(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class.from_json_file(lowerCAmelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __lowercase( self : List[str] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : int = feat_extract_first.save_pretrained(lowerCAmelCase__ )[0]
check_json_file_has_correct_format(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class.from_pretrained(lowerCAmelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __lowercase( self : Tuple )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class()
self.assertIsNotNone(lowerCAmelCase__ )
| 709 | import math
import sys
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ''
try:
with open(lowercase__ , 'rb' ) as binary_file:
SCREAMING_SNAKE_CASE__ : Tuple = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE__ : Tuple = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = '', ''
SCREAMING_SNAKE_CASE__ : Tuple = len(lowercase__ )
for i in range(len(lowercase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE__ : int = lexicon[curr_string]
result += last_match_id
SCREAMING_SNAKE_CASE__ : str = last_match_id + '0'
if math.loga(lowercase__ ).is_integer():
SCREAMING_SNAKE_CASE__ : List[str] = {}
for curr_key in list(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = lexicon.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = new_lex
SCREAMING_SNAKE_CASE__ : Any = last_match_id + '1'
index += 1
SCREAMING_SNAKE_CASE__ : Tuple = ''
return result
def _a ( lowercase__ : str , lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = 8
try:
with open(lowercase__ , 'wb' ) as opened_file:
SCREAMING_SNAKE_CASE__ : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(lowercase__ ) , lowercase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowercase__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = data_bits[counter:]
SCREAMING_SNAKE_CASE__ : int = data_bits[counter + 1 :]
return data_bits
def _a ( lowercase__ : str , lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = read_file_binary(lowercase__ )
SCREAMING_SNAKE_CASE__ : Dict = remove_prefix(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = decompress_data(lowercase__ )
write_file_binary(lowercase__ , lowercase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 636 | 0 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 710 | def _a ( lowercase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : List[Any] = set({'(', '[', '{'} )
SCREAMING_SNAKE_CASE__ : Optional[int] = set({')', ']', '}'} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'{': '}', '[': ']', '(': ')'}
for i in range(len(lowercase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowercase__ ) == 0 or (len(lowercase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowercase__ ) == 0
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = input('Enter sequence of brackets: ' )
if is_balanced(lowercase__ ):
print(lowercase__ , 'is balanced' )
else:
print(lowercase__ , 'is not balanced' )
if __name__ == "__main__":
main()
| 636 | 0 |
def _a ( lowercase__ : Optional[int] = 1_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (n * (n + 1) // 2) ** 2
SCREAMING_SNAKE_CASE__ : List[str] = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 711 | import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : List[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : int )-> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : List[Any] = PegasusTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def __lowercase( self : Any , **a_ : Optional[Any] )-> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Union[str, Any] , a_ : List[Any] )-> Optional[int]:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowercase( self : Optional[int] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = '</s>'
SCREAMING_SNAKE_CASE__ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def __lowercase( self : Dict )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(a_ ) , 1103 )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def __lowercase( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE__ : Any = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
SCREAMING_SNAKE_CASE__ : List[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
def __lowercase( self : int )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
SCREAMING_SNAKE_CASE__ : int = 'To ensure a smooth flow of bank resolutions.'
SCREAMING_SNAKE_CASE__ : List[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ['This is going to be way too long.' * 150, 'short example']
SCREAMING_SNAKE_CASE__ : int = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._large_tokenizer(
text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a_ ) == 2 # input_ids, attention_mask.
@slow
def __lowercase( self : Any )-> str:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : Any )-> Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[int] = PegasusTokenizer(a_ , offset=0 , mask_token_sent=a_ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase( self : Optional[Any] )-> List[str]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def __lowercase( self : List[str] , **a_ : Optional[Any] )-> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Optional[Any] , a_ : Tuple )-> str:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowercase( self : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE__ : str = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
SCREAMING_SNAKE_CASE__ : str = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
@require_torch
def __lowercase( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ['This is going to be way too long.' * 1000, 'short example']
SCREAMING_SNAKE_CASE__ : Optional[int] = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE__ : str = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer(
text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a_ ) == 2 # input_ids, attention_mask.
def __lowercase( self : Dict )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._large_tokenizer(a_ ).input_ids
self.assertListEqual(
a_ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 636 | 0 |
def _a ( lowercase__ : Dict ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
SCREAMING_SNAKE_CASE__ : Dict = int(input("Enter number: ").strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 712 | def _a ( lowercase__ : int = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowercase__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 636 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case ( unittest.TestCase ):
def __lowercase( self : Any )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
SCREAMING_SNAKE_CASE__ : Dict = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf_top_k_top_p_filtering(_SCREAMING_SNAKE_CASE , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
SCREAMING_SNAKE_CASE__ : Any = output[output != -float('inf' )]
SCREAMING_SNAKE_CASE__ : int = tf.cast(
tf.where(tf.not_equal(_SCREAMING_SNAKE_CASE , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1e-1_2 )
tf.debugging.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@require_tf
class snake_case ( unittest.TestCase , UpperCamelCase_ ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowercase_ = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def __lowercase( self : Dict )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
SCREAMING_SNAKE_CASE__ : Dict = 2
SCREAMING_SNAKE_CASE__ : int = 2
class snake_case ( tf.Module ):
def __init__( self : List[str] , a_ : List[Any] )-> Any:
"""simple docstring"""
super(_SCREAMING_SNAKE_CASE , self ).__init__()
SCREAMING_SNAKE_CASE__ : str = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=_SCREAMING_SNAKE_CASE , )
def __lowercase( self : List[Any] , a_ : Optional[Any] , a_ : List[str] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model.generate(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , max_new_tokens=_SCREAMING_SNAKE_CASE , return_dict_in_generate=_SCREAMING_SNAKE_CASE , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE__ : Any = [[2, 0], [102, 103]]
SCREAMING_SNAKE_CASE__ : int = [[1, 0], [1, 1]]
SCREAMING_SNAKE_CASE__ : List[Any] = DummyModel(model=_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , signatures={'serving_default': dummy_model.serving} )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.saved_model.load(_SCREAMING_SNAKE_CASE ).signatures['serving_default']
for batch_size in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 ):
SCREAMING_SNAKE_CASE__ : List[str] = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = serving_func(**_SCREAMING_SNAKE_CASE )['sequences']
SCREAMING_SNAKE_CASE__ : Any = test_model.generate(**_SCREAMING_SNAKE_CASE , max_new_tokens=_SCREAMING_SNAKE_CASE )
tf.debugging.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowercase( self : List[str] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : str = 2
class snake_case ( tf.Module ):
def __init__( self : List[Any] , a_ : int )-> Dict:
"""simple docstring"""
super(_SCREAMING_SNAKE_CASE , self ).__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=_SCREAMING_SNAKE_CASE , )
def __lowercase( self : Any , a_ : Dict , a_ : int )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model.generate(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , max_new_tokens=_SCREAMING_SNAKE_CASE , return_dict_in_generate=_SCREAMING_SNAKE_CASE , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[2], [102, 103]]
SCREAMING_SNAKE_CASE__ : str = [[1], [1, 1]]
SCREAMING_SNAKE_CASE__ : List[str] = DummyModel(model=_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , signatures={'serving_default': dummy_model.serving} )
SCREAMING_SNAKE_CASE__ : Dict = tf.saved_model.load(_SCREAMING_SNAKE_CASE ).signatures['serving_default']
for input_row in range(len(_SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = serving_func(**_SCREAMING_SNAKE_CASE )['sequences']
SCREAMING_SNAKE_CASE__ : Optional[int] = test_model.generate(**_SCREAMING_SNAKE_CASE , max_new_tokens=_SCREAMING_SNAKE_CASE )
tf.debugging.assert_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
@require_tensorflow_text
def __lowercase( self : int )-> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=_SCREAMING_SNAKE_CASE )
class snake_case ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : int = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_SCREAMING_SNAKE_CASE , 'spiece.model' ) , 'rb' ).read() )
SCREAMING_SNAKE_CASE__ : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def __lowercase( self : Dict , a_ : Optional[Any] , *a_ : int , **a_ : str )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = text.pad_model_inputs(
_SCREAMING_SNAKE_CASE , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
return self.tokenizer.detokenize(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : Optional[int] = CompleteSentenceTransformer()
SCREAMING_SNAKE_CASE__ : Dict = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
SCREAMING_SNAKE_CASE__ : List[str] = complete_model(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : Dict = tf.keras.Model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
keras_model.save(_SCREAMING_SNAKE_CASE )
def __lowercase( self : List[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
SCREAMING_SNAKE_CASE__ : Tuple = 14
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
SCREAMING_SNAKE_CASE__ : str = 'Hello, my dog is cute and'
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='tf' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE__ : int = model.generate(**_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.assertTrue(expectation == len(generated_tokens[0] ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [638, 198]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = model.generate(**_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
SCREAMING_SNAKE_CASE__ : str = 'Hugging Face is a technology company based in New York and Paris.'
SCREAMING_SNAKE_CASE__ : str = bart_tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE__ : str = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
SCREAMING_SNAKE_CASE__ : Any = bart_model.generate(_SCREAMING_SNAKE_CASE ).numpy()
class snake_case ( UpperCamelCase_ ):
def __lowercase( self : Union[str, Any] , a_ : str , a_ : Optional[Any]=None , **a_ : Optional[int] )-> List[str]:
"""simple docstring"""
return super().call(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : List[str] = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bart_model.generate(_SCREAMING_SNAKE_CASE , foo='bar' ).numpy()
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
class snake_case ( bart_model.model.encoder.__class__ ):
def __lowercase( self : str , a_ : Union[str, Any] , **a_ : Optional[int] )-> Optional[int]:
"""simple docstring"""
return super().call(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : str = FakeEncoder(bart_model.config , bart_model.model.shared )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
SCREAMING_SNAKE_CASE__ : Optional[Any] = bart_model.generate(_SCREAMING_SNAKE_CASE ).numpy()
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_SCREAMING_SNAKE_CASE , foo='bar' )
| 713 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any]=False , lowercase__ : str=False , lowercase__ : Dict=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def _a ( lowercase__ : List[str] , lowercase__ : Dict ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ : Dict = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_bias[-config.hidden_size :]
def _a ( lowercase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _a ( lowercase__ : int , lowercase__ : int , lowercase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = dct.pop(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = val
@torch.no_grad()
def _a ( lowercase__ : Dict , lowercase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : str = False
if "vqa" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : str = 31_29
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ : int = 'vqa2-id2label.json'
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Dict = idalabel
SCREAMING_SNAKE_CASE__ : str = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[str] = ViltForQuestionAnswering(lowercase__ )
elif "nlvr" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Dict = {0: 'False', 1: 'True'}
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in config.idalabel.items()}
SCREAMING_SNAKE_CASE__ : Tuple = 3
SCREAMING_SNAKE_CASE__ : int = ViltForImagesAndTextClassification(lowercase__ )
elif "irtr" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : str = ViltForImageAndTextRetrieval(lowercase__ )
elif "mlm_itm" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Optional[int] = ViltForMaskedLM(lowercase__ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ : Any = torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )['state_dict']
SCREAMING_SNAKE_CASE__ : Any = create_rename_keys(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ )
if mlm_model or irtr_model:
SCREAMING_SNAKE_CASE__ : Any = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowercase__ )
# Define processor
SCREAMING_SNAKE_CASE__ : str = ViltImageProcessor(size=3_84 )
SCREAMING_SNAKE_CASE__ : List[Any] = BertTokenizer.from_pretrained('bert-base-uncased' )
SCREAMING_SNAKE_CASE__ : List[Any] = ViltProcessor(lowercase__ , lowercase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
SCREAMING_SNAKE_CASE__ : List[str] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw )
SCREAMING_SNAKE_CASE__ : Any = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw )
SCREAMING_SNAKE_CASE__ : Tuple = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
SCREAMING_SNAKE_CASE__ : List[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : List[str] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : List[Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
SCREAMING_SNAKE_CASE__ : Tuple = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=lowercase__ ).raw )
if mlm_model:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'a bunch of [MASK] laying on a [MASK].'
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'How many cats are there?'
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(lowercase__ , lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : str = model(**lowercase__ )
# Verify outputs
if mlm_model:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size([1, 11, 3_05_22] )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 )
# verify masked token prediction equals "cats"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
SCREAMING_SNAKE_CASE__ : str = torch.Size([1, 31_29] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1E-4 )
# verify vqa prediction equals "2"
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size([1, 2] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 636 | 0 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Dict , a_ : int , a_ : int , a_ : int , a_ : float , a_ : int , a_ : int , a_ : int , a_ : int , a_ : str , a_ : bool = False , )-> Optional[int]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Tuple = nn.Embedding(a_ , a_ )
SCREAMING_SNAKE_CASE__ : List[str] = nn.Embedding(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Dropout(p=a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = TaConfig(
vocab_size=a_ , d_model=a_ , num_heads=a_ , d_kv=a_ , d_ff=a_ , dropout_rate=a_ , feed_forward_proj=a_ , is_decoder=a_ , is_encoder_decoder=a_ , )
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.ModuleList()
for lyr_num in range(a_ ):
SCREAMING_SNAKE_CASE__ : int = TaBlock(a_ )
self.encoders.append(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = TaLayerNorm(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = nn.Dropout(p=a_ )
def __lowercase( self : int , a_ : int , a_ : Dict )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.token_embedder(a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_input_tokens.shape[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.arange(a_ , device=encoder_input_tokens.device )
x += self.position_encoding(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = self.dropout_pre(a_ )
# inverted the attention mask
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_input_tokens.size()
SCREAMING_SNAKE_CASE__ : Dict = self.get_extended_attention_mask(a_ , a_ )
for lyr in self.encoders:
SCREAMING_SNAKE_CASE__ : Dict = lyr(a_ , a_ )[0]
SCREAMING_SNAKE_CASE__ : int = self.layer_norm(a_ )
return self.dropout_post(a_ ), encoder_inputs_mask
| 714 | from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case :
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
def __lowercase( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __lowercase( self : Dict )-> Union[str, Any]:
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __lowercase( self : Tuple )-> torch.Tensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = torch.arange(self.height * self.width )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(a_ , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.shape
SCREAMING_SNAKE_CASE__ : Tuple = int(np.prod(a_ ) )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_coords()
SCREAMING_SNAKE_CASE__ : Dict = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
SCREAMING_SNAKE_CASE__ : Any = self.get_camera_rays(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = rays.view(a_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __lowercase( self : Optional[Any] , a_ : torch.Tensor )-> torch.Tensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
SCREAMING_SNAKE_CASE__ : str = coords.view(a_ , -1 , 2 )
SCREAMING_SNAKE_CASE__ : List[Any] = self.resolution()
SCREAMING_SNAKE_CASE__ : str = self.fov()
SCREAMING_SNAKE_CASE__ : Any = (flat.float() / (res - 1)) * 2 - 1
SCREAMING_SNAKE_CASE__ : Any = fracs * torch.tan(fov / 2 )
SCREAMING_SNAKE_CASE__ : List[str] = fracs.view(a_ , -1 , 2 )
SCREAMING_SNAKE_CASE__ : str = (
self.z.view(a_ , 1 , 3 )
+ self.x.view(a_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(a_ , 1 , 3 ) * fracs[:, :, 1:]
)
SCREAMING_SNAKE_CASE__ : Tuple = directions / directions.norm(dim=-1 , keepdim=a_ )
SCREAMING_SNAKE_CASE__ : Any = torch.stack(
[
torch.broadcast_to(self.origin.view(a_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(a_ , *a_ , 2 , 3 )
def __lowercase( self : Optional[int] , a_ : int , a_ : int )-> "DifferentiableProjectiveCamera":
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=a_ , height=a_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def _a ( lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : str = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([np.sin(lowercase__ ), np.cos(lowercase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
SCREAMING_SNAKE_CASE__ : Tuple = -z * 4
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([np.cos(lowercase__ ), -np.sin(lowercase__ ), 0.0] )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.cross(lowercase__ , lowercase__ )
origins.append(lowercase__ )
xs.append(lowercase__ )
ys.append(lowercase__ )
zs.append(lowercase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase__ , axis=0 ) ).float() , width=lowercase__ , height=lowercase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase__ )) , )
| 636 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def _a ( lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ : List[Any] = val
def _a ( lowercase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE__ : Any = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
SCREAMING_SNAKE_CASE__ : int = value
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = value
return new_state_dict
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ''
if is_panoptic:
SCREAMING_SNAKE_CASE__ : int = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[:2_56, :]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_bias[:2_56]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_weight[2_56:5_12, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[2_56:5_12]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_weight[-2_56:, :]
SCREAMING_SNAKE_CASE__ : int = in_proj_bias[-2_56:]
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def _a ( lowercase__ : Any , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 'resnet101'
if "dc5" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : List[Any] = 'panoptic' in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE__ : int = 2_50
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 91
SCREAMING_SNAKE_CASE__ : int = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ : Dict = 'coco-detection-id2label.json'
SCREAMING_SNAKE_CASE__ : Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : str = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[str] = idalabel
SCREAMING_SNAKE_CASE__ : str = {v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE__ : List[str] = 'coco_panoptic' if is_panoptic else 'coco_detection'
SCREAMING_SNAKE_CASE__ : Tuple = ConditionalDetrImageProcessor(format=SCREAMING_SNAKE_CASE_ )
# prepare image
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : str = encoding['pixel_values']
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
SCREAMING_SNAKE_CASE__ : Any = torch.hub.load('DeppMeng/ConditionalDETR' , SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ ).eval()
SCREAMING_SNAKE_CASE__ : str = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE__ : Any = 'conditional_detr.' + src
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rename_backbone_keys(SCREAMING_SNAKE_CASE_ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , is_panoptic=SCREAMING_SNAKE_CASE_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE__ : Dict = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ : Dict = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ : Dict = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ : Dict = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ : int = ConditionalDetrForSegmentation(SCREAMING_SNAKE_CASE_ ) if is_panoptic else ConditionalDetrForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
model.push_to_hub(repo_id=SCREAMING_SNAKE_CASE_ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
SCREAMING_SNAKE_CASE__ : Union[str, Any] = conditional_detr(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ : int = model(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 715 | import requests
SCREAMING_SNAKE_CASE__ : int = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(f'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 636 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
SCREAMING_SNAKE_CASE__ : Optional[Any] = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
SCREAMING_SNAKE_CASE__ : int = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def _a ( lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = SavedModel()
SCREAMING_SNAKE_CASE__ : Any = []
with open(os.path.join(_lowerCAmelCase , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = json.load(_lowerCAmelCase )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_lowerCAmelCase )] )
with open(_lowerCAmelCase , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : int = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sorted(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_lowerCAmelCase )
if strict and len(_lowerCAmelCase ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_lowerCAmelCase ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_lowerCAmelCase , sep='\n' )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 716 | import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger()
@dataclass
class snake_case :
lowercase_ = 42
lowercase_ = field(default_factory=UpperCamelCase_ )
lowercase_ = field(default_factory=UpperCamelCase_ )
def __lowercase( self : Dict , a_ : Dict , a_ : Tensor , a_ : Tensor )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = len(list(m.modules() ) ) == 1 or isinstance(a_ , nn.Convad ) or isinstance(a_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a_ )
def __call__( self : Tuple , a_ : Tensor )-> Any:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a_ )
[x.remove() for x in self.handles]
return self
@property
def __lowercase( self : Tuple )-> int:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class snake_case :
lowercase_ = 42
lowercase_ = 42
lowercase_ = 1
lowercase_ = field(default_factory=UpperCamelCase_ )
lowercase_ = field(default_factory=UpperCamelCase_ )
lowercase_ = True
def __call__( self : List[Any] , a_ : Tensor )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = Tracker(self.dest )(a_ ).parametrized
SCREAMING_SNAKE_CASE__ : Optional[int] = Tracker(self.src )(a_ ).parametrized
SCREAMING_SNAKE_CASE__ : List[str] = list(filter(lambda a_ : type(a_ ) not in self.src_skip , a_ ) )
SCREAMING_SNAKE_CASE__ : Dict = list(filter(lambda a_ : type(a_ ) not in self.dest_skip , a_ ) )
if len(a_ ) != len(a_ ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(a_ )} operations while'''
F''' destination module has {len(a_ )}.''' )
for dest_m, src_m in zip(a_ , a_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class snake_case ( nn.Module ):
def __init__( self : List[Any] , a_ : nn.Module )-> Dict:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), F'''Unexpected layer name {k}'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a_ ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
SCREAMING_SNAKE_CASE__ : Any = nn.ModuleDict(a_ )
def __lowercase( self : Tuple , a_ : Tensor )-> Dict:
"""simple docstring"""
return get_trunk_forward_outputs(
a_ , out_feat_keys=a_ , feature_blocks=self._feature_blocks , )
class snake_case ( UpperCamelCase_ ):
def __lowercase( self : Optional[Any] , a_ : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Union[str, Any] , a_ : str )-> Callable[[], Tuple[nn.Module, Dict]]:
"""simple docstring"""
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE__ : Any = self.convert_name_to_timm(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = partial(lambda: (timm.create_model(a_ , pretrained=a_ ).eval(), None) )
else:
SCREAMING_SNAKE_CASE__ : List[str] = super().__getitem__(a_ )
return val
class snake_case ( UpperCamelCase_ ):
def __getitem__( self : Any , a_ : str )-> Callable[[], nn.Module]:
"""simple docstring"""
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE__ : Any = RegNetModel
else:
SCREAMING_SNAKE_CASE__ : Any = RegNetForImageClassification
return val
def _a ( lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE__ : Tuple = from_state_dict[from_key].clone()
print(f'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def _a ( lowercase__ : str , lowercase__ : Callable[[], nn.Module] , lowercase__ : Callable[[], nn.Module] , lowercase__ : RegNetConfig , lowercase__ : Path , lowercase__ : bool = True , ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = from_model_func()
SCREAMING_SNAKE_CASE__ : int = our_model_func(lowercase__ ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = ModuleTransfer(src=lowercase__ , dest=lowercase__ , raise_if_mismatch=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(lowercase__ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE__ : int = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
SCREAMING_SNAKE_CASE__ : Optional[Any] = manually_copy_vissl_head(lowercase__ , our_model.state_dict() , lowercase__ )
our_model.load_state_dict(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = our_model(lowercase__ , output_hidden_states=lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = (
our_outputs.logits if isinstance(lowercase__ , lowercase__ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE__ : List[Any] = from_model(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[str] = from_output[-1] if type(lowercase__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = our_outputs.hidden_states[-1]
assert torch.allclose(lowercase__ , lowercase__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=lowercase__ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2_24 if 'seer' not in name else 3_84
# we can use the convnext one
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=lowercase__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=lowercase__ , )
print(f'''Pushed {name}''' )
def _a ( lowercase__ : Path , lowercase__ : str = None , lowercase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE__ : Tuple = 10_00
SCREAMING_SNAKE_CASE__ : Tuple = (1, num_labels)
SCREAMING_SNAKE_CASE__ : str = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE__ : List[str] = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' ) ) , 'r' ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = idalabel
SCREAMING_SNAKE_CASE__ : Tuple = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Any = partial(lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
SCREAMING_SNAKE_CASE__ : List[Any] = NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE__ : Dict = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowercase__ : str , lowercase__ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE__ : str = torch.hub.load_state_dict_from_url(lowercase__ , model_dir=str(lowercase__ ) , map_location='cpu' )
SCREAMING_SNAKE_CASE__ : Tuple = model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE__ : str = files['classy_state_dict']['base_model']['model']
SCREAMING_SNAKE_CASE__ : str = model_state_dict['trunk']
model.load_state_dict(lowercase__ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE__ : Any = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : int = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : List[Any] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
SCREAMING_SNAKE_CASE__ : List[Any] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE__ : Any = partial(
lowercase__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowercase__ , lowercase__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowercase__ , lowercase__ , lowercase__ , )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 636 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class snake_case :
def __init__( self : Optional[int] , a_ : List[Any] , a_ : Union[str, Any] )-> None:
"""simple docstring"""
if len(a_ ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
SCREAMING_SNAKE_CASE__ : str = list(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = degree
def __add__( self : Dict , a_ : List[Any] )-> Polynomial:
"""simple docstring"""
if self.degree > polynomial_a.degree:
SCREAMING_SNAKE_CASE__ : int = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , a_ )
else:
SCREAMING_SNAKE_CASE__ : Tuple = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , a_ )
def __sub__( self : Any , a_ : int )-> Polynomial:
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Union[str, Any] )-> Polynomial:
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : str , a_ : str )-> Polynomial:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , a_ )
def __lowercase( self : str , a_ : Tuple )-> int | float:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : List[str] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(a_ )
return polynomial
def __repr__( self : List[str] )-> str:
"""simple docstring"""
return self.__str__()
def __lowercase( self : Union[str, Any] )-> Polynomial:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [0] * self.degree
for i in range(self.degree ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , a_ )
def __lowercase( self : List[str] , a_ : Optional[int] = 0 )-> Polynomial:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = [0] * (self.degree + 2)
SCREAMING_SNAKE_CASE__ : Tuple = constant
for i in range(self.degree + 1 ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , a_ )
def __eq__( self : List[Any] , a_ : Any )-> bool:
"""simple docstring"""
if not isinstance(a_ , a_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Any , a_ : List[Any] )-> bool:
"""simple docstring"""
return not self.__eq__(a_ )
| 717 | import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'OwlViTImageProcessor'
lowercase_ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[str] , a_ : List[Any]=None , a_ : str=None , **a_ : Any )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE__ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a_ , a_ )
def __call__( self : Any , a_ : Optional[int]=None , a_ : Tuple=None , a_ : List[Any]=None , a_ : Tuple="max_length" , a_ : str="np" , **a_ : Any )-> int:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(a_ , a_ ) or (isinstance(a_ , a_ ) and not isinstance(text[0] , a_ )):
SCREAMING_SNAKE_CASE__ : Tuple = [self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )]
elif isinstance(a_ , a_ ) and isinstance(text[0] , a_ ):
SCREAMING_SNAKE_CASE__ : Any = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE__ : str = max([len(a_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(a_ ) != max_num_queries:
SCREAMING_SNAKE_CASE__ : Tuple = t + [' '] * (max_num_queries - len(a_ ))
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )
encodings.append(a_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
SCREAMING_SNAKE_CASE__ : Dict = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ : List[Any] = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ : str = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ : Dict = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
SCREAMING_SNAKE_CASE__ : Optional[int] = BatchEncoding()
SCREAMING_SNAKE_CASE__ : List[str] = input_ids
SCREAMING_SNAKE_CASE__ : Tuple = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE__ : Any = BatchEncoding()
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor(
a_ , return_tensors=a_ , **a_ ).pixel_values
SCREAMING_SNAKE_CASE__ : Dict = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Dict = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def __lowercase( self : str , *a_ : List[str] , **a_ : int )-> List[Any]:
"""simple docstring"""
return self.image_processor.post_process(*a_ , **a_ )
def __lowercase( self : Tuple , *a_ : List[str] , **a_ : str )-> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*a_ , **a_ )
def __lowercase( self : Optional[Any] , *a_ : str , **a_ : Dict )-> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*a_ , **a_ )
def __lowercase( self : Optional[int] , *a_ : Tuple , **a_ : Tuple )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def __lowercase( self : Tuple , *a_ : Tuple , **a_ : Tuple )-> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
def __lowercase( self : Tuple )-> Any:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a_ , )
return self.image_processor_class
@property
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a_ , )
return self.image_processor
| 636 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case ( UpperCAmelCase__ , unittest.TestCase ):
lowercase_ = ShapEImgaImgPipeline
lowercase_ = ["image"]
lowercase_ = ["image"]
lowercase_ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
lowercase_ = False
@property
def __lowercase( self : Any )-> List[str]:
"""simple docstring"""
return 32
@property
def __lowercase( self : str )-> str:
"""simple docstring"""
return 32
@property
def __lowercase( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowercase( self : Tuple )-> Optional[int]:
"""simple docstring"""
return 8
@property
def __lowercase( self : Tuple )-> Any:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE__ : List[str] = CLIPVisionModel(lowerCamelCase__ )
return model
@property
def __lowercase( self : Optional[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowerCamelCase__ , do_normalize=lowerCamelCase__ , do_resize=lowerCamelCase__ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
@property
def __lowercase( self : Optional[Any] )-> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
SCREAMING_SNAKE_CASE__ : Tuple = PriorTransformer(**lowerCamelCase__ )
return model
@property
def __lowercase( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ : Dict = ShapERenderer(**lowerCamelCase__ )
return model
def __lowercase( self : int )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.dummy_prior
SCREAMING_SNAKE_CASE__ : str = self.dummy_image_encoder
SCREAMING_SNAKE_CASE__ : Any = self.dummy_image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_renderer
SCREAMING_SNAKE_CASE__ : str = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=lowerCamelCase__ , clip_sample=lowerCamelCase__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def __lowercase( self : List[str] , a_ : Optional[Any] , a_ : Optional[int]=0 )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def __lowercase( self : List[str] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = "cpu"
SCREAMING_SNAKE_CASE__ : int = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = self.pipeline_class(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.images[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Dict = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowercase( self : Optional[int] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = torch_device == "cpu"
SCREAMING_SNAKE_CASE__ : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCamelCase__ , relax_max_difference=lowerCamelCase__ , )
def __lowercase( self : int )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = self.pipeline_class(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Tuple = 2
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ : str = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ : List[str] = pipe(**lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def __lowercase( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
SCREAMING_SNAKE_CASE__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
SCREAMING_SNAKE_CASE__ : str = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
SCREAMING_SNAKE_CASE__ : str = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = pipe(
lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 718 | class snake_case ( UpperCamelCase_ ):
pass
class snake_case ( UpperCamelCase_ ):
pass
class snake_case :
def __init__( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [
[],
[],
[],
]
def __lowercase( self : int , a_ : int , a_ : int )-> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(a_ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def __lowercase( self : int )-> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self : Any )-> str:
"""simple docstring"""
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class snake_case :
def __init__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = []
def __lowercase( self : List[str] , a_ : int )-> None:
"""simple docstring"""
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(a_ )
def __lowercase( self : int )-> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = min(self.queue )
self.queue.remove(a_ )
return data
def __str__( self : List[str] )-> str:
"""simple docstring"""
return str(self.queue )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 636 | 0 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class snake_case ( __a , __a ):
@register_to_config
def __init__( self : List[str] , a_ : Any = 128 , a_ : Optional[int] = 256 , a_ : Optional[Any] = 2000.0 , a_ : int = 768 , a_ : Optional[Any] = 12 , a_ : Union[str, Any] = 12 , a_ : int = 64 , a_ : Optional[Any] = 2048 , a_ : Optional[int] = 0.1 , )-> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Sequential(
nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , )
SCREAMING_SNAKE_CASE__ : int = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Any = nn.Dropout(p=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[str] = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
# FiLM conditional T5 decoder
SCREAMING_SNAKE_CASE__ : int = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
self.decoders.append(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[str] = TaLayerNorm(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Dropout(p=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : str = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
def __lowercase( self : List[Any] , a_ : List[str] , a_ : Tuple )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowercase( self : List[Any] , a_ : Optional[int] , a_ : Tuple , a_ : Dict )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
SCREAMING_SNAKE_CASE__ : List[str] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
SCREAMING_SNAKE_CASE__ : List[str] = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.broadcast_to(
torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
SCREAMING_SNAKE_CASE__ : int = self.position_encoding(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.continuous_inputs_projection(lowerCAmelCase_ )
inputs += position_encodings
SCREAMING_SNAKE_CASE__ : int = self.dropout(lowerCAmelCase_ )
# decoder: No padding present.
SCREAMING_SNAKE_CASE__ : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
SCREAMING_SNAKE_CASE__ : Any = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
SCREAMING_SNAKE_CASE__ : Any = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
SCREAMING_SNAKE_CASE__ : Tuple = lyr(
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0]
SCREAMING_SNAKE_CASE__ : Tuple = self.decoder_norm(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : int = self.post_dropout(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : int = self.spec_out(lowerCAmelCase_ )
return spec_out
class snake_case ( nn.Module ):
def __init__( self : List[Any] , a_ : List[Any] , a_ : Optional[int] , a_ : Optional[Any] , a_ : Optional[int] , a_ : List[str] , a_ : Tuple=1e-6 )-> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) )
def __lowercase( self : Union[str, Any] , a_ : Dict , a_ : Tuple=None , a_ : Any=None , a_ : Tuple=None , a_ : Dict=None , a_ : List[Any]=None , )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.layer[0](
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
if encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.layer[1](
lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
# Apply Film Conditional Feed Forward layer
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ )
return (hidden_states,)
class snake_case ( nn.Module ):
def __init__( self : Dict , a_ : List[str] , a_ : List[str] , a_ : Union[str, Any] , a_ : List[Any] )-> List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[str] = TaLayerNorm(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : str = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Dropout(lowerCAmelCase_ )
def __lowercase( self : str , a_ : List[str] , a_ : str=None , a_ : Dict=None , )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ )
# Self-attention block
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.attention(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[str] = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class snake_case ( nn.Module ):
def __init__( self : Dict , a_ : Optional[int] , a_ : List[str] , a_ : List[str] , a_ : Union[str, Any] , a_ : Union[str, Any] )-> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[int] = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Dict = nn.Dropout(lowerCAmelCase_ )
def __lowercase( self : Dict , a_ : int , a_ : int=None , a_ : Tuple=None , )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.layer_norm(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.attention(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , )
SCREAMING_SNAKE_CASE__ : List[str] = hidden_states + self.dropout(lowerCAmelCase_ )
return layer_output
class snake_case ( nn.Module ):
def __init__( self : List[str] , a_ : List[str] , a_ : Any , a_ : str , a_ : str )-> Optional[int]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[int] = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Dict = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : int = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Tuple = nn.Dropout(lowerCAmelCase_ )
def __lowercase( self : Any , a_ : str , a_ : Tuple=None )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE__ : int = self.film(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.DenseReluDense(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Any = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class snake_case ( nn.Module ):
def __init__( self : Any , a_ : Union[str, Any] , a_ : Optional[Any] , a_ : Optional[int] )-> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Tuple = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Dropout(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = NewGELUActivation()
def __lowercase( self : int , a_ : Dict )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.act(self.wi_a(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ : Any = self.wi_a(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : int = hidden_gelu * hidden_linear
SCREAMING_SNAKE_CASE__ : List[str] = self.dropout(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.wo(lowerCAmelCase_ )
return hidden_states
class snake_case ( nn.Module ):
def __init__( self : Any , a_ : Any , a_ : Union[str, Any]=1e-6 )-> List[str]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Parameter(torch.ones(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ : int = eps
def __lowercase( self : Optional[int] , a_ : Dict )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
SCREAMING_SNAKE_CASE__ : Any = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class snake_case ( nn.Module ):
def __lowercase( self : Tuple , a_ : str )-> torch.Tensor:
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(lowerCAmelCase_ , 3.0 )) ))
class snake_case ( nn.Module ):
def __init__( self : Dict , a_ : str , a_ : Optional[Any] )-> Tuple:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : str = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ )
def __lowercase( self : Optional[Any] , a_ : Optional[Any] , a_ : List[str] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scale_bias(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = torch.chunk(lowerCAmelCase_ , 2 , -1 )
SCREAMING_SNAKE_CASE__ : Tuple = x * (1 + scale) + shift
return x
| 719 | from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _a ( lowercase__ : List[str] ):
'''simple docstring'''
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE__ : str = version.parse(accelerate.__version__ ).base_version
if version.parse(lowercase__ ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[int] , *lowercase__ : int , **lowercase__ : Tuple ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *lowercase__ , **lowercase__ )
return wrapper
| 636 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720 | import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _a ( lowercase__ : int ):
'''simple docstring'''
if is_torch_version('<' , '2.0.0' ) or not hasattr(lowercase__ , '_dynamo' ):
return False
return isinstance(lowercase__ , torch._dynamo.eval_frame.OptimizedModule )
def _a ( lowercase__ : Optional[Any] , lowercase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE__ : Dict = is_compiled_module(lowercase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : Tuple = model
SCREAMING_SNAKE_CASE__ : int = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Any = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(lowercase__ , 'forward' )
SCREAMING_SNAKE_CASE__ : str = model.__dict__.pop('_original_forward' , lowercase__ )
if original_forward is not None:
while hasattr(lowercase__ , '__wrapped__' ):
SCREAMING_SNAKE_CASE__ : Dict = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE__ : Dict = forward
if getattr(lowercase__ , '_converted_to_transformer_engine' , lowercase__ ):
convert_model(lowercase__ , to_transformer_engine=lowercase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : List[Any] = model
SCREAMING_SNAKE_CASE__ : Optional[Any] = compiled_model
return model
def _a ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _a ( lowercase__ : str , lowercase__ : Optional[Any] ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowercase__ , lowercase__ )
elif PartialState().local_process_index == 0:
torch.save(lowercase__ , lowercase__ )
@contextmanager
def _a ( **lowercase__ : str ):
'''simple docstring'''
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE__ : int = str(lowercase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if not hasattr(lowercase__ , '__qualname__' ) and not hasattr(lowercase__ , '__name__' ):
SCREAMING_SNAKE_CASE__ : Any = getattr(lowercase__ , '__class__' , lowercase__ )
if hasattr(lowercase__ , '__qualname__' ):
return obj.__qualname__
if hasattr(lowercase__ , '__name__' ):
return obj.__name__
return str(lowercase__ )
def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ):
'''simple docstring'''
for key, value in source.items():
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = destination.setdefault(lowercase__ , {} )
merge_dicts(lowercase__ , lowercase__ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = value
return destination
def _a ( lowercase__ : int = None ):
'''simple docstring'''
if port is None:
SCREAMING_SNAKE_CASE__ : int = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 636 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class snake_case :
lowercase_ = 42
lowercase_ = None
lowercase_ = None
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Node(1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Node(2 )
SCREAMING_SNAKE_CASE__ : Optional[int] = Node(3 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = Node(4 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Node(5 )
return tree
def _a ( lowercase__ : Node | None ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _a ( lowercase__ : Node | None ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _a ( lowercase__ : Node | None ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _a ( lowercase__ : Node | None ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _a ( lowercase__ : Node | None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
if root is None:
return output
SCREAMING_SNAKE_CASE__ : Optional[int] = deque([root] )
while process_queue:
SCREAMING_SNAKE_CASE__ : List[str] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _a ( lowercase__ : Node | None , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = []
def populate_output(lowercase__ : Node | None , lowercase__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_UpperCamelCase , _UpperCamelCase )
return output
def _a ( lowercase__ : Node | None , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = []
def populate_output(lowercase__ : Node | None , lowercase__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_UpperCamelCase , _UpperCamelCase )
return output
def _a ( lowercase__ : Node | None ):
'''simple docstring'''
if root is None:
return []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = height(_UpperCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ : str = 1
else:
output.append(get_nodes_from_right_to_left(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ : Dict = 0
return output
def _a ( ): # Main function for testing.
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_tree()
print(f'''In-order Traversal: {inorder(_UpperCamelCase )}''' )
print(f'''Pre-order Traversal: {preorder(_UpperCamelCase )}''' )
print(f'''Post-order Traversal: {postorder(_UpperCamelCase )}''' , '\n' )
print(f'''Height of Tree: {height(_UpperCamelCase )}''' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(_UpperCamelCase ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(_UpperCamelCase ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(_UpperCamelCase , level=_UpperCamelCase ) )
print('\nZigZag order Traversal: ' )
print(zigzag(_UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 721 | from __future__ import annotations
def _a ( lowercase__ : list[int | float] , lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
if len(lowercase__ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(lowercase__ )
or left < -len(lowercase__ )
or right >= len(lowercase__ )
or right < -len(lowercase__ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : int = find_max(lowercase__ , lowercase__ , lowercase__ ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Tuple = find_max(lowercase__ , mid + 1 , lowercase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 636 | 0 |
def _a ( lowercase__ : int ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
SCREAMING_SNAKE_CASE__ : List[str] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE__ : Any = 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE__ : List[str] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
SCREAMING_SNAKE_CASE__ : int = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 700 | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _a ( lowercase__ : Any ):
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _a ( lowercase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _a ( lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = [state.process_index]
SCREAMING_SNAKE_CASE__ : Any = gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, f'''{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def _a ( lowercase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _a ( lowercase__ : int ):
'''simple docstring'''
if state.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.arange(state.num_processes ).to(state.device )
SCREAMING_SNAKE_CASE__ : Any = pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE__ : List[Any] = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = reduce(lowercase__ , 'sum' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}'''
def _a ( lowercase__ : int ):
'''simple docstring'''
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE__ : Any = create_tensor(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = reduce(lowercase__ , 'mean' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), f'''{reduced_tensor} != {truth_tensor}'''
def _a ( lowercase__ : int ):
'''simple docstring'''
main()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = PartialState()
state.print(f'''State: {state}''' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 636 | 0 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
SCREAMING_SNAKE_CASE__ : int = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 701 | import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : Any = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , a_ : Optional[int] , a_ : Dict=7 , a_ : Any=3 , a_ : Any=18 , a_ : int=30 , a_ : int=400 , a_ : List[Any]=None , a_ : int=True , a_ : int=True , a_ : Dict=None , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'height': 20, 'width': 20}
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Any = num_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE__ : List[str] = min_resolution
SCREAMING_SNAKE_CASE__ : Dict = max_resolution
SCREAMING_SNAKE_CASE__ : List[Any] = size
SCREAMING_SNAKE_CASE__ : Tuple = do_normalize
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_convert_rgb
SCREAMING_SNAKE_CASE__ : List[str] = [512, 1024, 2048, 4096]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __lowercase( self : Dict )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(a_ , stream=a_ ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PixaStructImageProcessor if is_vision_available() else None
def __lowercase( self : List[str] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = PixaStructImageProcessingTester(self )
@property
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , 'do_normalize' ) )
self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor_tester.prepare_dummy_image()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ : List[Any] = 2048
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(a_ , return_tensors='pt' , max_patches=a_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase( self : Any )-> Any:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
SCREAMING_SNAKE_CASE__ : int = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(a_ ):
SCREAMING_SNAKE_CASE__ : Dict = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
SCREAMING_SNAKE_CASE__ : List[Any] = 'Hello'
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processor(
a_ , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
SCREAMING_SNAKE_CASE__ : str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PixaStructImageProcessor if is_vision_available() else None
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = PixaStructImageProcessingTester(self , num_channels=4 )
SCREAMING_SNAKE_CASE__ : Dict = 3
@property
def __lowercase( self : Any )-> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase( self : Dict )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , 'do_normalize' ) )
self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) )
def __lowercase( self : str )-> Union[str, Any]:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 636 | 0 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case ( __snake_case , __snake_case ):
@register_to_config
def __init__( self : Dict , a_ : Dict , a_ : Dict = None , a_ : Tuple = None )-> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : int = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
SCREAMING_SNAKE_CASE__ : Dict = torch.zeros(__UpperCamelCase , __UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.Parameter(__UpperCamelCase )
class snake_case ( __snake_case ):
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
def __init__( self : Tuple , a_ : List[Any] , a_ : Dict , a_ : Tuple , a_ : str , a_ : Tuple , a_ : List[str] , )-> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=__UpperCamelCase , transformer=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , scheduler=__UpperCamelCase , learned_classifier_free_sampling_embeddings=__UpperCamelCase , )
def __lowercase( self : Any , a_ : Dict , a_ : int , a_ : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = len(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(
__UpperCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
SCREAMING_SNAKE_CASE__ : List[str] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE__ : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
SCREAMING_SNAKE_CASE__ : int = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE__ : int = prompt_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
SCREAMING_SNAKE_CASE__ : str = self.learned_classifier_free_sampling_embeddings.embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCamelCase , 1 , 1 )
else:
SCREAMING_SNAKE_CASE__ : Dict = [''] * batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(
__UpperCamelCase , padding='max_length' , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='pt' , )
SCREAMING_SNAKE_CASE__ : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
SCREAMING_SNAKE_CASE__ : Dict = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE__ : Dict = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE__ : List[Any] = negative_prompt_embeds.repeat(1 , __UpperCamelCase , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE__ : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : List[Any] , a_ : Optional[Any] , a_ : str = 100 , a_ : List[Any] = 5.0 , a_ : List[str] = 1.0 , a_ : List[str] = 1 , a_ : Tuple = None , a_ : Union[str, Any] = None , a_ : Any = "pil" , a_ : str = True , a_ : Tuple = None , a_ : Tuple = 1 , )-> str:
"""simple docstring"""
if isinstance(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ : Dict = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ : Dict = len(__UpperCamelCase )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}''' )
SCREAMING_SNAKE_CASE__ : Any = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE__ : Dict = guidance_scale > 1.0
SCREAMING_SNAKE_CASE__ : Dict = self._encode_prompt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(__UpperCamelCase )}.''' )
# get the initial completely masked latents unless the user supplied it
SCREAMING_SNAKE_CASE__ : str = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
SCREAMING_SNAKE_CASE__ : str = self.transformer.num_vector_embeds - 1
SCREAMING_SNAKE_CASE__ : Tuple = torch.full(__UpperCamelCase , __UpperCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase , device=self.device )
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE__ : Dict = latents
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the sample if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
SCREAMING_SNAKE_CASE__ : Tuple = self.transformer(__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase ).sample
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_output.chunk(2 )
SCREAMING_SNAKE_CASE__ : Any = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCamelCase , dim=1 , keepdim=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = self.truncate(__UpperCamelCase , __UpperCamelCase )
# remove `log(0)`'s (`-inf`s)
SCREAMING_SNAKE_CASE__ : List[str] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ : Dict = self.scheduler.step(__UpperCamelCase , timestep=__UpperCamelCase , sample=__UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = self.vqvae.config.vq_embed_dim
SCREAMING_SNAKE_CASE__ : Tuple = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
SCREAMING_SNAKE_CASE__ : int = self.vqvae.quantize.get_codebook_entry(__UpperCamelCase , shape=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = self.vqvae.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase ).sample
SCREAMING_SNAKE_CASE__ : str = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ : int = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
def __lowercase( self : str , a_ : Optional[int] , a_ : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = torch.sort(__UpperCamelCase , 1 , descending=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.exp(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
SCREAMING_SNAKE_CASE__ : Tuple = torch.full_like(keep_mask[:, 0:1, :] , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat((all_true, keep_mask) , dim=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = keep_mask[:, :-1, :]
SCREAMING_SNAKE_CASE__ : Tuple = keep_mask.gather(1 , indices.argsort(1 ) )
SCREAMING_SNAKE_CASE__ : Tuple = log_p_x_0.clone()
SCREAMING_SNAKE_CASE__ : List[Any] = -torch.inf # -inf = log(0)
return rv
| 702 | import heapq as hq
import math
from collections.abc import Iterator
class snake_case :
def __init__( self : str , a_ : str )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = str(id_ )
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : int , a_ : Tuple )-> Union[str, Any]:
"""simple docstring"""
return self.key < other.key
def __repr__( self : Any )-> Dict:
"""simple docstring"""
return self.id
def __lowercase( self : Optional[Any] , a_ : int )-> List[str]:
"""simple docstring"""
self.neighbors.append(a_ )
def __lowercase( self : int , a_ : int , a_ : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = weight
def _a ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Dict ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowercase__ )
graph[b - 1].add_edge(graph[a - 1] , lowercase__ )
def _a ( lowercase__ : list , lowercase__ : Vertex ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = []
for u in graph:
SCREAMING_SNAKE_CASE__ : Dict = math.inf
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : int = graph[:]
while q:
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(lowercase__ )
q.remove(lowercase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE__ : int = u
SCREAMING_SNAKE_CASE__ : Any = u.edges[v.id]
for i in range(1 , len(lowercase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _a ( lowercase__ : list , lowercase__ : Vertex ):
'''simple docstring'''
for u in graph:
SCREAMING_SNAKE_CASE__ : List[str] = math.inf
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : Tuple = list(lowercase__ )
hq.heapify(lowercase__ )
while h:
SCREAMING_SNAKE_CASE__ : Optional[int] = hq.heappop(lowercase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE__ : List[str] = u
SCREAMING_SNAKE_CASE__ : Dict = u.edges[v.id]
hq.heapify(lowercase__ )
for i in range(1 , len(lowercase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 636 | 0 |
from __future__ import annotations
import math
def _a ( lowercase__ : List[Any] ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = str(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Dict = [n]
for i in range(1 , len(lowerCAmelCase_ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if len(str(lowerCAmelCase_ ) ) > 3:
if not is_prime(int(str(lowerCAmelCase_ )[-3:] ) ) or not is_prime(int(str(lowerCAmelCase_ )[:3] ) ):
return False
return True
def _a ( lowercase__ : Any = 11 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[int] = []
SCREAMING_SNAKE_CASE__ : str = 13
while len(lowerCAmelCase_ ) != count:
if validate(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ : Dict = list_truncated_nums(lowerCAmelCase_ )
if all(is_prime(lowerCAmelCase_ ) for i in list_nums ):
list_truncated_primes.append(lowerCAmelCase_ )
num += 2
return list_truncated_primes
def _a ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(11)) = }""")
| 703 | def _a ( lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _a ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 636 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self : Optional[int] , a_ : Union[str, Any] , a_ : str=7 , a_ : List[str]=3 , a_ : Optional[int]=18 , a_ : Union[str, Any]=30 , a_ : int=400 , a_ : Dict=True , a_ : Dict=None , a_ : Optional[int]=True , a_ : Optional[int]=None , a_ : List[Any]=True , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""shortest_edge""": 20}
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : Any = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = image_size
SCREAMING_SNAKE_CASE__ : int = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
SCREAMING_SNAKE_CASE__ : int = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : str = crop_size
SCREAMING_SNAKE_CASE__ : Dict = do_flip_channel_order
def __lowercase( self : List[str] )-> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class snake_case ( _UpperCAmelCase , unittest.TestCase ):
lowercase_ = MobileViTImageProcessor if is_vision_available() else None
def __lowercase( self : Any )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = MobileViTImageProcessingTester(self )
@property
def __lowercase( self : str )-> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase__ , 'size' ) )
self.assertTrue(hasattr(lowercase__ , 'do_center_crop' ) )
self.assertTrue(hasattr(lowercase__ , 'center_crop' ) )
self.assertTrue(hasattr(lowercase__ , 'do_flip_channel_order' ) )
def __lowercase( self : List[str] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __lowercase( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
pass
def __lowercase( self : Optional[int] )-> str:
"""simple docstring"""
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowercase( self : List[str] )-> Any:
"""simple docstring"""
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Dict = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 704 | from math import factorial, radians
def _a ( lowercase__ : float , lowercase__ : int = 18 , lowercase__ : int = 10 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE__ : int = radians(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = angle_in_radians
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 636 | 0 |
from maths.prime_factors import prime_factors
def _a ( lowercase__ : int ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
__A : Tuple = f'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(a_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 | import math
def _a ( lowercase__ : int ):
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
SCREAMING_SNAKE_CASE__ : Tuple = range(3 , int(math.sqrt(lowercase__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _a ( lowercase__ : List[str] , lowercase__ : Any=1 , **lowercase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = factor * value
SCREAMING_SNAKE_CASE__ : Dict = value
while not is_prime(lowercase__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowercase__ )
return value
| 636 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {
"""asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class snake_case ( UpperCamelCase_ ):
lowercase_ = "sew"
def __init__( self : Any , a_ : Dict=32 , a_ : Optional[Any]=768 , a_ : List[Any]=12 , a_ : Any=12 , a_ : str=3072 , a_ : Tuple=2 , a_ : List[Any]="gelu" , a_ : Optional[Any]=0.1 , a_ : Optional[Any]=0.1 , a_ : Optional[int]=0.1 , a_ : Any=0.0 , a_ : str=0.1 , a_ : Optional[int]=0.1 , a_ : Dict=0.02 , a_ : List[str]=1e-5 , a_ : List[Any]="group" , a_ : List[str]="gelu" , a_ : Optional[int]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , a_ : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a_ : Optional[int]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a_ : Tuple=False , a_ : int=128 , a_ : Any=16 , a_ : List[Any]=True , a_ : List[Any]=0.05 , a_ : List[str]=10 , a_ : Optional[Any]=2 , a_ : Union[str, Any]=0.0 , a_ : Dict=10 , a_ : Tuple=0 , a_ : List[Any]="mean" , a_ : List[Any]=False , a_ : str=False , a_ : Tuple=256 , a_ : int=0 , a_ : str=1 , a_ : List[str]=2 , **a_ : Optional[int] , )-> Tuple:
"""simple docstring"""
super().__init__(**a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ )
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract_norm
SCREAMING_SNAKE_CASE__ : Dict = feat_extract_activation
SCREAMING_SNAKE_CASE__ : Any = list(a_ )
SCREAMING_SNAKE_CASE__ : Dict = list(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = list(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = conv_bias
SCREAMING_SNAKE_CASE__ : int = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ : Any = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ : int = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = squeeze_factor
SCREAMING_SNAKE_CASE__ : str = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout
SCREAMING_SNAKE_CASE__ : str = attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = activation_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_proj_dropout
SCREAMING_SNAKE_CASE__ : int = final_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = layerdrop
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : Any = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ : str = apply_spec_augment
SCREAMING_SNAKE_CASE__ : int = mask_time_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mask_time_length
SCREAMING_SNAKE_CASE__ : Dict = mask_time_min_masks
SCREAMING_SNAKE_CASE__ : Any = mask_feature_prob
SCREAMING_SNAKE_CASE__ : List[str] = mask_feature_length
SCREAMING_SNAKE_CASE__ : Optional[int] = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE__ : List[Any] = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ : int = ctc_zero_infinity
# sequence classification
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_weighted_layer_sum
SCREAMING_SNAKE_CASE__ : Optional[Any] = classifier_proj_size
@property
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 706 | import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case :
def __init__( self : str , a_ : List[str] , a_ : Tuple=13 , a_ : Dict=30 , a_ : Optional[int]=2 , a_ : Tuple=3 , a_ : Dict=True , a_ : int=True , a_ : Optional[Any]=32 , a_ : List[str]=5 , a_ : Any=4 , a_ : Dict=37 , a_ : Dict="gelu" , a_ : int=0.1 , a_ : Optional[Any]=0.1 , a_ : Any=10 , a_ : List[str]=0.02 , a_ : Any=3 , a_ : List[str]=None , a_ : Optional[int]=2 , )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : int = batch_size
SCREAMING_SNAKE_CASE__ : int = image_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = scope
SCREAMING_SNAKE_CASE__ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE__ : Optional[int] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_patches + 2
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowercase( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowercase( self : List[str] , a_ : List[str] , a_ : Optional[Any] , a_ : str )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = DeiTModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : List[Any] , a_ : List[str] , a_ : List[str] , a_ : List[Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = DeiTForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DeiTForMaskedImageModeling(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : int = model(a_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowercase( self : List[str] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = DeiTForImageClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : int = DeiTForImageClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase( self : int )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = DeiTModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
pass
def __lowercase( self : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def __lowercase( self : str )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = model_class(a_ )
SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a_ )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : List[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def __lowercase( self : str , a_ : str , a_ : Tuple , a_ : Union[str, Any]=False )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE__ : Tuple = model_class(a_ )
model.to(a_ )
model.train()
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**a_ ).loss
loss.backward()
def __lowercase( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**a_ ).loss
loss.backward()
def __lowercase( self : Optional[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[str] = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a_ ),
*get_values(a_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
SCREAMING_SNAKE_CASE__ : int = problem_type['title']
SCREAMING_SNAKE_CASE__ : Tuple = problem_type['num_labels']
SCREAMING_SNAKE_CASE__ : str = model_class(a_ )
model.to(a_ )
model.train()
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
SCREAMING_SNAKE_CASE__ : Any = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a_ ) as warning_list:
SCREAMING_SNAKE_CASE__ : str = model(**a_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __lowercase( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = DeiTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def __lowercase( self : int )-> Dict:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(images=a_ , return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**a_ )
# verify the logits
SCREAMING_SNAKE_CASE__ : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowercase( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(images=a_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : str = inputs.pixel_values.to(a_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ )
| 636 | 0 |
import string
def _a ( lowercase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ""
for i in sequence:
SCREAMING_SNAKE_CASE__ : List[str] = ord(lowercase_ )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def _a ( lowercase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = string.ascii_letters
SCREAMING_SNAKE_CASE__ : str = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase_ )] if c in letters else c for c in sequence )
def _a ( ):
'''simple docstring'''
from timeit import timeit
print('Running performance benchmarks...' )
SCREAMING_SNAKE_CASE__ : List[Any] = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f'''> atbash_slow(): {timeit('atbash_slow(printable)' , setup=lowercase_ )} seconds''' )
print(f'''> atbash(): {timeit('atbash(printable)' , setup=lowercase_ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 707 | import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self : List[Any] , a_ : Dict , a_ : Any=13 , a_ : Any=7 , a_ : Tuple=True , a_ : Tuple=True , a_ : Optional[int]=False , a_ : Dict=True , a_ : Optional[Any]=99 , a_ : Any=32 , a_ : Dict=5 , a_ : Tuple=4 , a_ : List[str]=37 , a_ : Union[str, Any]="gelu" , a_ : Dict=0.1 , a_ : Tuple=0.1 , a_ : List[str]=512 , a_ : List[str]=16 , a_ : List[str]=2 , a_ : Optional[int]=0.02 , a_ : List[str]=3 , a_ : Union[str, Any]=4 , a_ : Optional[Any]=None , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Dict = batch_size
SCREAMING_SNAKE_CASE__ : Dict = seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = num_labels
SCREAMING_SNAKE_CASE__ : Dict = num_choices
SCREAMING_SNAKE_CASE__ : str = scope
def __lowercase( self : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
def __lowercase( self : Any , a_ : str , a_ : Tuple , a_ : Dict , a_ : Optional[int] , a_ : List[Any] , a_ : Union[str, Any] , a_ : Tuple )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BioGptModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : List[Any] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Tuple , a_ : Optional[Any] , a_ : int , a_ : Optional[int] , a_ : int , a_ : str , a_ : Optional[Any] , )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = BioGptForCausalLM(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase( self : Tuple , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : Any , a_ : Optional[int] , *a_ : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = BioGptModel(config=a_ )
model.to(a_ )
model.eval()
# create attention mask
SCREAMING_SNAKE_CASE__ : Any = torch.ones(input_ids.shape , dtype=torch.long , device=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.seq_length // 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
# first forward pass
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , attention_mask=a_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
SCREAMING_SNAKE_CASE__ : str = ids_tensor((1,) , a_ ).item() + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = random_other_next_tokens
# append to next input_ids and attn_mask
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Dict = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=a_ )] , dim=1 , )
# get two different outputs
SCREAMING_SNAKE_CASE__ : str = model(a_ , attention_mask=a_ )['last_hidden_state']
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , past_key_values=a_ , attention_mask=a_ )['last_hidden_state']
# select random slice
SCREAMING_SNAKE_CASE__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : str , a_ : List[Any] , a_ : str , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[Any] , *a_ : List[str] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel(config=a_ ).to(a_ ).eval()
SCREAMING_SNAKE_CASE__ : Dict = torch.ones(input_ids.shape , dtype=torch.long , device=a_ )
# first forward pass
SCREAMING_SNAKE_CASE__ : Any = model(a_ , attention_mask=a_ , use_cache=a_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(a_ , attention_mask=a_ )['last_hidden_state']
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , attention_mask=a_ , past_key_values=a_ )[
'last_hidden_state'
]
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : Any , a_ : List[str] , a_ : Optional[int] , a_ : Any , a_ : Tuple , a_ : Any , *a_ : List[Any] , a_ : Union[str, Any]=False )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = BioGptForCausalLM(a_ )
model.to(a_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __lowercase( self : Union[str, Any] , a_ : List[str] , *a_ : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = BioGptModel(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __lowercase( self : Dict , a_ : Tuple , a_ : Tuple , a_ : List[str] , a_ : Any , a_ : str , *a_ : str )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.num_labels
SCREAMING_SNAKE_CASE__ : str = BioGptForTokenClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , attention_mask=a_ , token_type_ids=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase( self : Any )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowercase_ = (BioGptForCausalLM,) if is_torch_available() else ()
lowercase_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = False
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def __lowercase( self : Tuple )-> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : List[str] = type
self.model_tester.create_and_check_model(*a_ )
def __lowercase( self : int )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*a_ )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*a_ , gradient_checkpointing=a_ )
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*a_ )
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*a_ )
def __lowercase( self : str )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*a_ )
@slow
def __lowercase( self : List[str] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(a_ )
SCREAMING_SNAKE_CASE__ : Dict = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE__ : List[str] = 'left'
# Define PAD Token = EOS Token = 50256
SCREAMING_SNAKE_CASE__ : Any = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : Tuple = model.config.eos_token_id
# use different length sentences to test batching
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'Hello, my dog is a little',
'Today, I',
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(a_ , return_tensors='pt' , padding=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = inputs['input_ids'].to(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = model.generate(
input_ids=a_ , attention_mask=inputs['attention_mask'].to(a_ ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(a_ )
SCREAMING_SNAKE_CASE__ : Dict = model.generate(input_ids=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(input_ids=a_ , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(a_ , a_ )
self.assertListEqual(a_ , [non_padded_sentence, padded_sentence] )
@slow
def __lowercase( self : Any )-> List[Any]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowercase( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] = 3
SCREAMING_SNAKE_CASE__ : List[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.ne(1 ).to(a_ )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : int = BioGptForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : str = 3
SCREAMING_SNAKE_CASE__ : Any = 'multi_label_classification'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE__ : Any = input_ids.ne(1 ).to(a_ )
SCREAMING_SNAKE_CASE__ : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ : Dict = BioGptForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def __lowercase( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([[2, 4805, 9, 656, 21]] )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ )[0]
SCREAMING_SNAKE_CASE__ : List[str] = 4_2384
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , a_ )
SCREAMING_SNAKE_CASE__ : int = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
@slow
def __lowercase( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE__ : Dict = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(a_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer('COVID-19 is' , return_tensors='pt' ).to(a_ )
SCREAMING_SNAKE_CASE__ : int = model.generate(
**a_ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=a_ , )
SCREAMING_SNAKE_CASE__ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(a_ , a_ )
| 636 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.