code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_lowerCamelCase =logging.get_logger(__name__)
def _a ( lowerCamelCase, lowerCamelCase ):
try:
with open(lowerCamelCase, """rb""" ) as flax_state_f:
lowerCamelCase : Optional[Any] = from_bytes(lowerCamelCase, flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowerCamelCase ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
lowerCamelCase : Tuple = flatten_dict(jax.tree_util.tree_map(lambda lowerCamelCase : x.dtype == jnp.bfloataa, lowerCamelCase ) ).values()
if any(lowerCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
lowerCamelCase : Union[str, Any] = jax.tree_util.tree_map(
lambda lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, lowerCamelCase )
lowerCamelCase : Any = """"""
lowerCamelCase : Tuple = flatten_dict(lowerCamelCase, sep=""".""" )
lowerCamelCase : Optional[Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCamelCase : List[Any] = []
lowerCamelCase : Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase : List[str] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCamelCase : int = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCamelCase : Union[str, Any] = jnp.transpose(lowerCamelCase, (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCamelCase : Tuple = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCamelCase : str = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCamelCase : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowerCamelCase ):
lowerCamelCase : Union[str, Any] = (
flax_key_tuple_string.replace("""_0""", """.0""" )
.replace("""_1""", """.1""" )
.replace("""_2""", """.2""" )
.replace("""_3""", """.3""" )
.replace("""_4""", """.4""" )
.replace("""_5""", """.5""" )
.replace("""_6""", """.6""" )
.replace("""_7""", """.7""" )
.replace("""_8""", """.8""" )
.replace("""_9""", """.9""" )
)
lowerCamelCase : int = """.""".join(lowerCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowerCamelCase : int = np.asarray(lowerCamelCase ) if not isinstance(lowerCamelCase, np.ndarray ) else flax_tensor
lowerCamelCase : Tuple = torch.from_numpy(lowerCamelCase )
# remove from missing keys
missing_keys.remove(lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCamelCase )
pt_model.load_state_dict(lowerCamelCase )
# re-transform missing_keys to list
lowerCamelCase : Any = list(lowerCamelCase )
if len(lowerCamelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(lowerCamelCase ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
""" use it for predictions and inference.""" )
return pt_model
| 681
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[str] = k_size // 2
lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) )
return g
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
lowerCamelCase : Dict = height - k_size + 1
lowerCamelCase : str = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
lowerCamelCase : List[Any] = 0
for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ):
lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
lowerCamelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase )
lowerCamelCase : str = ravel(lowerCamelCase )
# reshape and get the dst image
lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_lowerCamelCase =imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
_lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowerCamelCase =gaussian_filter(gray, 3, sigma=1)
_lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 681
| 1
|
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_):
def __init__( self: str , *_lowerCAmelCase: Union[str, Any] , **_lowerCAmelCase: str ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 708
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def UpperCAmelCase__ ( ):
lowercase :List[str] = torch.nn.Linear(2, 4 )
lowercase :List[Any] = torch.optim.AdamW(model.parameters(), lr=1.0 )
lowercase :int = torch.optim.lr_scheduler.OneCycleLR(lowerCamelCase, max_lr=0.01, steps_per_epoch=2, epochs=1 )
lowercase :Optional[int] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
lowercase :List[str] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def UpperCAmelCase__ ( lowerCamelCase ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowerCamelCase )
class __lowerCAmelCase ( lowerCAmelCase):
@require_cuda
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :List[str] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_lowerCAmelCase ):
lowercase :Any = Accelerator(cpu=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Any = Accelerator()
lowercase :Dict = GradientState()
assert state.num_steps == 1
lowercase :Tuple = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowercase :List[Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :str = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase :Any = create_components()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) :Tuple = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :Dict = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase :List[str] = create_components()
accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE ( self: int ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_lowerCAmelCase: List[str] , **_lowerCAmelCase: Optional[int] ):
pass
with patch("torch.cuda.set_device" , _lowerCAmelCase ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
lowercase :List[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :Tuple = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase :Optional[Any] = create_components()
accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase :Tuple = get_signature(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowerCAmelCase )
# make sure random weights don't match
load_random_weights(_lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCAmelCase ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(_lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCAmelCase ) ) < 1e-3 )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :Dict = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase :int = create_components()
accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase :int = get_signature(_lowerCAmelCase )
# saving hook
def save_config(_lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: List[Any] ):
lowercase :Dict = {"class_name": models[0].__class__.__name__}
with open(os.path.join(_lowerCAmelCase , "data.json" ) , "w" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# loading hook
def load_config(_lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Union[str, Any] ):
with open(os.path.join(_lowerCAmelCase , "data.json" ) , "r" ) as f:
lowercase :int = json.load(_lowerCAmelCase )
lowercase :Optional[int] = config["class_name"]
lowercase :Optional[Any] = accelerator.register_save_state_pre_hook(_lowerCAmelCase )
lowercase :Tuple = accelerator.register_load_state_pre_hook(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowerCAmelCase )
# make sure random weights don't match with hooks
load_random_weights(_lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCAmelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowercase :Optional[int] = "random"
# make sure loaded weights match with hooks
accelerator.load_state(_lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCAmelCase ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowerCAmelCase )
# make sure random weights don't match with hooks removed
load_random_weights(_lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCAmelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowercase :str = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(_lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCAmelCase ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :List[str] = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase :List[Any] = create_components()
lowercase :List[str] = None
# This should work
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase :str = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Union[str, Any] = Accelerator()
lowercase , lowercase , lowercase , lowercase , lowercase :Union[str, Any] = create_components()
lowercase :Union[str, Any] = [1, 2, 3]
# This should work
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase :List[str] = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(
getattr(_lowerCAmelCase , "_is_accelerate_prepared" , _lowerCAmelCase ) , _lowerCAmelCase , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(_lowerCAmelCase , "_is_accelerate_prepared" , _lowerCAmelCase ) , _lowerCAmelCase , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowerCAmelCase , "_is_accelerate_prepared" , _lowerCAmelCase ) , _lowerCAmelCase , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowerCAmelCase , "_is_accelerate_prepared" , _lowerCAmelCase ) , _lowerCAmelCase , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowerCAmelCase , "_is_accelerate_prepared" , _lowerCAmelCase ) , _lowerCAmelCase , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowerCAmelCase , "_is_accelerate_prepared" , _lowerCAmelCase ) , _lowerCAmelCase , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
from transformers import AutoModelForCausalLM
lowercase :Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=_lowerCAmelCase , device_map={"": 0} , )
lowercase :int = Accelerator()
# This should work
lowercase :Optional[int] = accelerator.prepare(_lowerCAmelCase )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE ( self: List[str] ):
from transformers import AutoModelForCausalLM
lowercase :Optional[Any] = Accelerator()
with init_empty_weights():
lowercase :Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
lowercase :Dict = infer_auto_device_map(_lowerCAmelCase )
lowercase :int = "cpu"
lowercase :str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=_lowerCAmelCase , load_in_abit=_lowerCAmelCase , llm_inta_enable_fpaa_cpu_offload=_lowerCAmelCase )
# This should not work and get value error
with self.assertRaises(_lowerCAmelCase ):
lowercase :Dict = accelerator.prepare(_lowerCAmelCase )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self: Any ):
from transformers import AutoModelForCausalLM
lowercase :List[str] = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
lowercase :str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
lowercase :Optional[Any] = infer_auto_device_map(_lowerCAmelCase )
lowercase :Any = 1
lowercase :str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=_lowerCAmelCase , device_map=_lowerCAmelCase , )
lowercase :List[str] = Accelerator()
# This should not work and get value error
with self.assertRaises(_lowerCAmelCase ):
lowercase :Optional[Any] = accelerator.prepare(_lowerCAmelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowercase :List[str] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
lowercase :List[Any] = infer_auto_device_map(_lowerCAmelCase )
lowercase :Optional[int] = 1
lowercase :Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=_lowerCAmelCase , device_map=_lowerCAmelCase , )
lowercase :Union[str, Any] = Accelerator()
# This should work
lowercase :List[Any] = accelerator.prepare(_lowerCAmelCase )
@require_cuda
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :Optional[int] = torch.nn.Linear(10 , 10 )
lowercase :Optional[int] = torch.optim.SGD(model.parameters() , lr=0.01 )
lowercase :List[Any] = Accelerator(cpu=_lowerCAmelCase )
lowercase :List[str] = accelerator.prepare(_lowerCAmelCase )
| 453
| 0
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = 'T5Config'
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> jnp.ndarray:
__UpperCAmelCase =jnp.zeros_like(snake_case__ )
__UpperCAmelCase =shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__UpperCAmelCase =shifted_input_ids.at[:, 0].set(snake_case__ )
__UpperCAmelCase =jnp.where(shifted_input_ids == -100 , snake_case__ , snake_case__ )
return shifted_input_ids
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Tuple = '''mt5'''
a_ : Dict = MTaConfig
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Tuple = '''mt5'''
a_ : List[Any] = MTaConfig
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Tuple = '''mt5'''
a_ : List[str] = MTaConfig
| 132
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : int = '''deta'''
a_ : Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=9_0_0 , UpperCAmelCase=2_0_4_8 , UpperCAmelCase=6 , UpperCAmelCase=2_0_4_8 , UpperCAmelCase=8 , UpperCAmelCase=6 , UpperCAmelCase=1_0_2_4 , UpperCAmelCase=8 , UpperCAmelCase=0.0 , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1.0 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="sine" , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=True , UpperCAmelCase=3_0_0 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.25 , **UpperCAmelCase , ):
'''simple docstring'''
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
__UpperCAmelCase =CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''])
else:
if isinstance(UpperCAmelCase , UpperCAmelCase):
__UpperCAmelCase =backbone_config.pop('''model_type''')
__UpperCAmelCase =CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase =config_class.from_dict(UpperCAmelCase)
__UpperCAmelCase =backbone_config
__UpperCAmelCase =num_queries
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =d_model
__UpperCAmelCase =encoder_ffn_dim
__UpperCAmelCase =encoder_layers
__UpperCAmelCase =encoder_attention_heads
__UpperCAmelCase =decoder_ffn_dim
__UpperCAmelCase =decoder_layers
__UpperCAmelCase =decoder_attention_heads
__UpperCAmelCase =dropout
__UpperCAmelCase =attention_dropout
__UpperCAmelCase =activation_dropout
__UpperCAmelCase =activation_function
__UpperCAmelCase =init_std
__UpperCAmelCase =init_xavier_std
__UpperCAmelCase =encoder_layerdrop
__UpperCAmelCase =auxiliary_loss
__UpperCAmelCase =position_embedding_type
# deformable attributes
__UpperCAmelCase =num_feature_levels
__UpperCAmelCase =encoder_n_points
__UpperCAmelCase =decoder_n_points
__UpperCAmelCase =two_stage
__UpperCAmelCase =two_stage_num_proposals
__UpperCAmelCase =with_box_refine
__UpperCAmelCase =assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''')
# Hungarian matcher
__UpperCAmelCase =class_cost
__UpperCAmelCase =bbox_cost
__UpperCAmelCase =giou_cost
# Loss coefficients
__UpperCAmelCase =mask_loss_coefficient
__UpperCAmelCase =dice_loss_coefficient
__UpperCAmelCase =bbox_loss_coefficient
__UpperCAmelCase =giou_loss_coefficient
__UpperCAmelCase =eos_coefficient
__UpperCAmelCase =focal_alpha
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase)
@property
def A__ (self):
'''simple docstring'''
return self.encoder_attention_heads
@property
def A__ (self):
'''simple docstring'''
return self.d_model
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =copy.deepcopy(self.__dict__)
__UpperCAmelCase =self.backbone_config.to_dict()
__UpperCAmelCase =self.__class__.model_type
return output
| 132
| 1
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : int = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : Union[str, Any] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 720
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675
| 0
|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
a_ :List[str] =None
a_ :Tuple =BloomTokenizerFast
a_ :List[str] =BloomTokenizerFast
a_ :Dict =True
a_ :Optional[Any] =False
a_ :List[Any] ="""tokenizer_file"""
a_ :Optional[Any] ={"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def __a ( self : Tuple ):
'''simple docstring'''
super().setUp()
__a = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self : str , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __a ( self : str ):
'''simple docstring'''
__a = self.get_rust_tokenizer()
__a = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
__a = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
__a = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE__ )["""input_ids"""]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__a = """This is a simple input"""
__a = ["""This is a simple input 1""", """This is a simple input 2"""]
__a = ("""This is a simple input""", """This is a pair""")
__a = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
tokenizer_r.batch_encode_plus(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
tokenizer_r.encode(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
tokenizer_r.batch_encode_plus(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
__a = None # Hotfixing padding = None
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , )
def __a ( self : List[Any] ):
'''simple docstring'''
__a = self.get_rust_tokenizer()
__a = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=SCREAMING_SNAKE_CASE__ )
__a = next(iter(SCREAMING_SNAKE_CASE__ ) )["""premise"""] # pick up one data
__a = list(sample_data.values() )
__a = list(map(tokenizer.encode , SCREAMING_SNAKE_CASE__ ) )
__a = [tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) for x in output_tokens]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __a ( self : Dict ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 582
|
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE = 10**9 ) -> int:
"""simple docstring"""
__a = 1
__a = 2
__a = 0
__a = 0
__a = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__a = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 582
| 1
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
__a = [image]
__a = [trans(img.convert("""RGB""" ) ) for img in image]
__a = torch.stack(_SCREAMING_SNAKE_CASE )
return image
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : List[str] , __lowercase : List[Any] , __lowercase : List[str] ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
__a = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowercase , scheduler=__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : int ):
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"The value of strength should in [0.0, 1.0] but is {strength}" )
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[int] , __lowercase : Any , __lowercase : int ):
'''simple docstring'''
# get the original timestep using init_timestep
__a = min(int(num_inference_steps * strength ) , __lowercase )
__a = max(num_inference_steps - init_timestep , 0 )
__a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase_ ( self : str , __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Tuple , __lowercase : List[str]=None ):
'''simple docstring'''
if not isinstance(__lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowercase )}" )
__a = image.to(device=__lowercase , dtype=__lowercase )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(__lowercase )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
__a = init_latents.shape
__a = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase )
# get latents
print("""add noise to latents at timestep""" , __lowercase )
__a = self.scheduler.add_noise(__lowercase , __lowercase , __lowercase )
__a = init_latents
return latents
@torch.no_grad()
def __call__( self : str , __lowercase : Union[torch.FloatTensor, PIL.Image.Image] = None , __lowercase : float = 0.8 , __lowercase : int = 1 , __lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowercase : float = 0.0 , __lowercase : int = 50 , __lowercase : Optional[bool] = None , __lowercase : Optional[str] = "pil" , __lowercase : bool = True , ):
'''simple docstring'''
self.check_inputs(__lowercase )
# 2. Preprocess image
__a = preprocess(__lowercase )
# 3. set timesteps
self.scheduler.set_timesteps(__lowercase , device=self.device )
__a , __a = self.get_timesteps(__lowercase , __lowercase , self.device )
__a = timesteps[:1].repeat(__lowercase )
# 4. Prepare latent variables
__a = self.prepare_latents(__lowercase , __lowercase , __lowercase , self.unet.dtype , self.device , __lowercase )
__a = latents
# 5. Denoising loop
for t in self.progress_bar(__lowercase ):
# 1. predict noise model_output
__a = self.unet(__lowercase , __lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__a = self.scheduler.step(
__lowercase , __lowercase , __lowercase , eta=__lowercase , use_clipped_model_output=__lowercase , generator=__lowercase , ).prev_sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__lowercase )
| 547
|
from __future__ import annotations
from collections import Counter
from random import random
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ):
'''simple docstring'''
__a = {}
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
__a = {}
def UpperCamelCase_ ( self : List[str] , __lowercase : str , __lowercase : str , __lowercase : float ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(__lowercase )
if nodea not in self.connections:
self.add_node(__lowercase )
__a = probability
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return list(self.connections )
def UpperCamelCase_ ( self : Tuple , __lowercase : str ):
'''simple docstring'''
__a = 0
__a = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : list[tuple[str, str, float]] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = Counter(graph.get_nodes() )
__a = start
for _ in range(_SCREAMING_SNAKE_CASE ):
__a = graph.transition(_SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 547
| 1
|
from graphs.minimum_spanning_tree_kruskal import kruskal
def _snake_case () -> List[str]:
_lowercase =9
_lowercase =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_lowercase =kruskal(snake_case__ , snake_case__)
_lowercase =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(snake_case__) == sorted(snake_case__)
| 181
|
"""simple docstring"""
from __future__ import annotations
A_ = 10
def UpperCAmelCase__ (snake_case__ : list[int] ):
"""simple docstring"""
_snake_case : Any = 1
_snake_case : Dict = max(snake_case__ )
while placement <= max_digit:
# declare and initialize empty buckets
_snake_case : list[list] = [[] for _ in range(snake_case__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_snake_case : List[str] = int((i / placement) % RADIX )
buckets[tmp].append(snake_case__ )
# put each buckets' contents into list_of_ints
_snake_case : Dict = 0
for b in range(snake_case__ ):
for i in buckets[b]:
_snake_case : str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 609
| 0
|
from __future__ import annotations
from math import pi
def UpperCamelCase ( snake_case__ : float ,snake_case__ : float ,snake_case__ : float ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : Optional[int] = "visual_bert"
def __init__( self , a__=3_05_22 , a__=7_68 , a__=5_12 , a__=12 , a__=12 , a__=30_72 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=2 , a__=0.02 , a__=1e-12 , a__=False , a__=True , a__=1 , a__=0 , a__=2 , **a__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
__snake_case :Tuple = vocab_size
__snake_case :Any = max_position_embeddings
__snake_case :int = hidden_size
__snake_case :List[str] = visual_embedding_dim
__snake_case :List[str] = num_hidden_layers
__snake_case :Tuple = num_attention_heads
__snake_case :str = intermediate_size
__snake_case :Union[str, Any] = hidden_act
__snake_case :Union[str, Any] = hidden_dropout_prob
__snake_case :str = attention_probs_dropout_prob
__snake_case :Tuple = initializer_range
__snake_case :str = type_vocab_size
__snake_case :Optional[Any] = layer_norm_eps
__snake_case :Union[str, Any] = bypass_transformer
__snake_case :Tuple = special_visual_initialize
| 291
| 1
|
from __future__ import annotations
import time
UpperCAmelCase_ = list[tuple[int, int]]
UpperCAmelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __magic_name__ :
"""simple docstring"""
def __init__( self : Optional[int] , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : Node | None ):
"""simple docstring"""
_UpperCamelCase: Dict = pos_x
_UpperCamelCase: Dict = pos_y
_UpperCamelCase: Tuple = (pos_y, pos_x)
_UpperCamelCase: Dict = goal_x
_UpperCamelCase: Optional[Any] = goal_y
_UpperCamelCase: str = parent
class __magic_name__ :
"""simple docstring"""
def __init__( self : str , _lowercase : tuple[int, int] , _lowercase : tuple[int, int] ):
"""simple docstring"""
_UpperCamelCase: int = Node(start[1] , start[0] , goal[1] , goal[0] , _lowercase )
_UpperCamelCase: Optional[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , _lowercase )
_UpperCamelCase: Dict = [self.start]
_UpperCamelCase: Optional[int] = False
def lowerCAmelCase ( self : int ):
"""simple docstring"""
while self.node_queue:
_UpperCamelCase: Dict = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_UpperCamelCase: Dict = True
return self.retrace_path(_lowercase )
_UpperCamelCase: Optional[Any] = self.get_successors(_lowercase )
for node in successors:
self.node_queue.append(_lowercase )
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase ( self : List[Any] , _lowercase : Node ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = []
for action in delta:
_UpperCamelCase: Dict = parent.pos_x + action[1]
_UpperCamelCase: List[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowercase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_lowercase , _lowercase , self.target.pos_y , self.target.pos_x , _lowercase ) )
return successors
def lowerCAmelCase ( self : Optional[int] , _lowercase : Node | None ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = node
_UpperCamelCase: List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_UpperCamelCase: Optional[Any] = current_node.parent
path.reverse()
return path
class __magic_name__ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowercase : Tuple , _lowercase : List[str] ):
"""simple docstring"""
_UpperCamelCase: Any = BreadthFirstSearch(_lowercase , _lowercase )
_UpperCamelCase: Any = BreadthFirstSearch(_lowercase , _lowercase )
_UpperCamelCase: List[str] = False
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_UpperCamelCase: Optional[Any] = self.fwd_bfs.node_queue.pop(0 )
_UpperCamelCase: Optional[int] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_UpperCamelCase: str = True
return self.retrace_bidirectional_path(
_lowercase , _lowercase )
_UpperCamelCase: Dict = current_bwd_node
_UpperCamelCase: Optional[int] = current_fwd_node
_UpperCamelCase: Union[str, Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(_lowercase ),
self.bwd_bfs: self.bwd_bfs.get_successors(_lowercase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_lowercase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCAmelCase ( self : str , _lowercase : Node , _lowercase : Node ):
"""simple docstring"""
_UpperCamelCase: Any = self.fwd_bfs.retrace_path(_lowercase )
_UpperCamelCase: Any = self.bwd_bfs.retrace_path(_lowercase )
bwd_path.pop()
bwd_path.reverse()
_UpperCamelCase: List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
UpperCAmelCase_ = (0, 0)
UpperCAmelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = BreadthFirstSearch(init, goal)
UpperCAmelCase_ = bfs.search()
UpperCAmelCase_ = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = BidirectionalBreadthFirstSearch(init, goal)
UpperCAmelCase_ = bd_bfs.search()
UpperCAmelCase_ = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 271
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
UpperCAmelCase_ = get_tests_dir('''fixtures/dummy-config.json''')
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: Dict = 0
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: List[str] = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(_lowercase , _lowercase )
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: int = AutoConfig.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCamelCase: List[Any] = AutoConfig.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: List[str] = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(_lowercase , _lowercase )
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_UpperCamelCase: Union[str, Any] = os.path.join(_lowercase , '''fake-roberta''' )
os.makedirs(_lowercase , exist_ok=_lowercase )
with open(os.path.join(_lowercase , '''config.json''' ) , '''w''' ) as f:
f.write(json.dumps({} ) )
_UpperCamelCase: Union[str, Any] = AutoConfig.from_pretrained(_lowercase )
self.assertEqual(type(_lowercase ) , _lowercase )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
try:
AutoConfig.register('''custom''' , _lowercase )
# Wrong model type will raise an error
with self.assertRaises(_lowercase ):
AutoConfig.register('''model''' , _lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoConfig.register('''bert''' , _lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
_UpperCamelCase: Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowercase )
_UpperCamelCase: Any = AutoConfig.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(
_lowercase , '''bert-base is not a local folder and is not a valid model identifier''' ):
_UpperCamelCase: Optional[int] = AutoConfig.from_pretrained('''bert-base''' )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
_lowercase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_UpperCamelCase: str = AutoConfig.from_pretrained(_lowercase , revision='''aaaaaa''' )
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
_lowercase , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
_UpperCamelCase: int = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
with self.assertRaises(_lowercase ):
_UpperCamelCase: List[str] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase ):
_UpperCamelCase: Optional[int] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_lowercase )
_UpperCamelCase: Union[str, Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_lowercase )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowercase )
_UpperCamelCase: str = AutoConfig.from_pretrained(_lowercase , trust_remote_code=_lowercase )
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' )
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
class __magic_name__ ( __a ):
"""simple docstring"""
lowerCAmelCase : List[Any] = '''new-model'''
try:
AutoConfig.register('''new-model''' , _lowercase )
# If remote code is not set, the default is to use local
_UpperCamelCase: List[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
_UpperCamelCase: Union[str, Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_lowercase )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
_UpperCamelCase: Any = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_lowercase )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 271
| 1
|
'''simple docstring'''
class _a :
def __init__( self : Dict , lowercase : Any , lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase = name
UpperCAmelCase = val
def __str__( self : List[str] ):
'''simple docstring'''
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : Dict , lowercase : Any ):
'''simple docstring'''
return self.val < other.val
class _a :
def __init__( self : List[Any] , lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = {}
UpperCAmelCase = self.build_heap(lowercase )
def __getitem__( self : Tuple , lowercase : int ):
'''simple docstring'''
return self.get_value(lowercase )
def A ( self : Optional[Any] , lowercase : List[str] ):
'''simple docstring'''
return (idx - 1) // 2
def A ( self : int , lowercase : List[str] ):
'''simple docstring'''
return idx * 2 + 1
def A ( self : Tuple , lowercase : Dict ):
'''simple docstring'''
return idx * 2 + 2
def A ( self : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
return self.heap_dict[key]
def A ( self : Union[str, Any] , lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = len(lowercase ) - 1
UpperCAmelCase = self.get_parent_idx(lowercase )
for idx, i in enumerate(lowercase ):
UpperCAmelCase = idx
UpperCAmelCase = i.val
for i in range(lowercase , -1 , -1 ):
self.sift_down(lowercase , lowercase )
return array
def A ( self : int , lowercase : Optional[int] , lowercase : List[Any] ):
'''simple docstring'''
while True:
UpperCAmelCase = self.get_left_child_idx(lowercase ) # noqa: E741
UpperCAmelCase = self.get_right_child_idx(lowercase )
UpperCAmelCase = idx
if l < len(lowercase ) and array[l] < array[idx]:
UpperCAmelCase = l
if r < len(lowercase ) and array[r] < array[smallest]:
UpperCAmelCase = r
if smallest != idx:
UpperCAmelCase , UpperCAmelCase = array[smallest], array[idx]
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
UpperCAmelCase = smallest
else:
break
def A ( self : int , lowercase : int ):
'''simple docstring'''
UpperCAmelCase = self.get_parent_idx(lowercase )
while p >= 0 and self.heap[p] > self.heap[idx]:
UpperCAmelCase , UpperCAmelCase = self.heap[idx], self.heap[p]
UpperCAmelCase , UpperCAmelCase = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
UpperCAmelCase = p
UpperCAmelCase = self.get_parent_idx(lowercase )
def A ( self : int ):
'''simple docstring'''
return self.heap[0]
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.heap[-1], self.heap[0]
UpperCAmelCase , UpperCAmelCase = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
UpperCAmelCase = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def A ( self : Optional[int] , lowercase : Dict ):
'''simple docstring'''
self.heap.append(lowercase )
UpperCAmelCase = len(self.heap ) - 1
UpperCAmelCase = node.val
self.sift_up(len(self.heap ) - 1 )
def A ( self : Any ):
'''simple docstring'''
return len(self.heap ) == 0
def A ( self : Any , lowercase : Tuple , lowercase : str ):
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
UpperCAmelCase = new_value
UpperCAmelCase = new_value
self.sift_up(self.idx_of_element[node] )
A =Node('R', -1)
A =Node('B', 6)
A =Node('A', 3)
A =Node('X', 1)
A =Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
A =MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _a :
def __init__( self : Optional[Any] , lowercase : int , lowercase : str=13 , lowercase : int=7 , lowercase : Optional[int]=True , lowercase : Dict=True , lowercase : Union[str, Any]=True , lowercase : Union[str, Any]=99 , lowercase : Optional[Any]=32 , lowercase : List[Any]=5 , lowercase : List[Any]=4 , lowercase : int=37 , lowercase : int="gelu" , lowercase : Tuple=0.1 , lowercase : Dict=0.1 , lowercase : Tuple=512 , lowercase : List[str]=16 , lowercase : Optional[int]=2 , lowercase : int=0.02 , lowercase : str=3 , lowercase : List[Any]=4 , lowercase : List[str]=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def A ( self : Any , lowercase : Tuple , lowercase : List[Any] , lowercase : Optional[Any] , lowercase : List[str] , *lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = OpenAIGPTModel(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , token_type_ids=lowercase , head_mask=lowercase )
UpperCAmelCase = model(lowercase , token_type_ids=lowercase )
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[str] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : str , lowercase : Optional[int] , *lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = OpenAIGPTLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Optional[int] , lowercase : Tuple , lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] , *lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = OpenAIGPTDoubleHeadsModel(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Tuple , lowercase : Dict , lowercase : Any , lowercase : Any , lowercase : Optional[int] , *lowercase : int ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = OpenAIGPTForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _a ( __a , __a , __a , unittest.TestCase ):
__a : List[str] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__a : List[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__a : Dict = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def A ( self : Optional[Any] , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : int , lowercase : List[Any] , lowercase : Optional[Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def A ( self : List[Any] , lowercase : int , lowercase : List[Any] , lowercase : Optional[Any]=False ):
'''simple docstring'''
UpperCAmelCase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase , )
UpperCAmelCase = inputs_dict['''labels''']
UpperCAmelCase = inputs_dict['''labels''']
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase , )
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = OpenAIGPTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase , n_embd=37 )
def A ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = OpenAIGPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _a ( unittest.TestCase ):
@slow
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowercase )
UpperCAmelCase = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowercase ) # the president is
UpperCAmelCase = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase = model.generate(lowercase , do_sample=lowercase )
self.assertListEqual(output_ids[0].tolist() , lowercase )
| 358
| 1
|
'''simple docstring'''
lowerCAmelCase = """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCamelCase_ ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
_A = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
_A = Stack()
_A = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__UpperCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__UpperCamelCase )
elif i == ")":
# RULE 4
_A = operator_stack.peek()
operator_stack.pop()
_A = operand_stack.peek()
operand_stack.pop()
_A = operand_stack.peek()
operand_stack.pop()
_A = operators[opr](__UpperCamelCase , __UpperCamelCase )
operand_stack.push(__UpperCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCAmelCase = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 292
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase_ ( __UpperCamelCase : dict , __UpperCamelCase : str ) -> set[str]:
"""simple docstring"""
_A , _A = set(__UpperCamelCase ), [start]
while stack:
_A = stack.pop()
explored.add(__UpperCamelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__UpperCamelCase )
return explored
lowerCAmelCase = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 292
| 1
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Optional[int] , __A : int ):
snake_case__ : Any = metric_id
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = [MetricMock(_snake_case ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def _lowercase ( self : List[str] ):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Any , snake_case_ : Dict ):
if "tmp_path" in args:
snake_case__ : List[str] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__A , match="https://huggingface.co/docs/evaluate" ):
func(*__A )
| 701
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Optional[Any] = parent
snake_case__ : str = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Optional[Any] = min_resolution
snake_case__ : List[str] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : str = size
snake_case__ : str = do_normalize
snake_case__ : Optional[Any] = image_mean
snake_case__ : List[str] = image_std
snake_case__ : List[str] = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : Tuple = do_pad
def _lowercase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ):
if not batched:
snake_case__ : List[Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : str = image.size
else:
snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Any = int(self.size["shortest_edge"] * h / w )
snake_case__ : Any = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Any = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Tuple = self.size["shortest_edge"]
snake_case__ : int = self.size["shortest_edge"]
else:
snake_case__ : Any = []
for image in image_inputs:
snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : int = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Tuple ):
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : str ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : int ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[Any] ):
# prepare image and target
snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Tuple = json.loads(f.read() )
snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : str = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image, target and masks_path
snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25
| 0
|
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowercase ( lowerCAmelCase_ , unittest.TestCase ):
__a = WavaVecaPhonemeCTCTokenizer
__a = False
def lowercase_ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ : Optional[Any] = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
lowerCAmelCase__ : Dict = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowerCAmelCase__ : int = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
lowerCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=20 , SCREAMING_SNAKE_CASE__=5 ):
"""simple docstring"""
lowerCAmelCase__ : int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase )) for i in range(len(__lowerCAmelCase ) )]
lowerCAmelCase__ : Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__lowerCAmelCase ) , __lowerCAmelCase ) )
if max_length is not None and len(__lowerCAmelCase ) > max_length:
lowerCAmelCase__ : Union[str, Any] = toks[:max_length]
if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0:
while len(__lowerCAmelCase ) < min_length:
lowerCAmelCase__ : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase__ : List[str] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase__ : Any = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
if " " not in output_txt and len(__lowerCAmelCase ) > 1:
lowerCAmelCase__ : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase )
)
if with_prefix_space:
lowerCAmelCase__ : Any = ''' ''' + output_txt
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
return output_txt, output_ids
def lowercase_ ( self , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
lowerCAmelCase__ : List[Any] = tokenizer('''m xxx ɪ''' , do_phonemize=__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
lowerCAmelCase__ : Dict = tokenizer('''m aaa ɪ ccc''' , do_phonemize=__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
lowerCAmelCase__ : List[str] = tokenizer('''maɪ c''' , do_phonemize=__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , [3, 200] ) # mai should be <unk> (=3)
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCAmelCase__ : Dict = '''Hello how are you'''
lowerCAmelCase__ : Optional[Any] = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang='''en-us''' )
self.assertEqual(__lowerCAmelCase , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCAmelCase__ : List[Any] = '''Hello how are you'''
lowerCAmelCase__ : Tuple = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(__lowerCAmelCase ).input_ids , tokenizer(__lowerCAmelCase , do_phonemize=__lowerCAmelCase ).input_ids )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCAmelCase__ : List[Any] = '''Hello how are you'''
lowerCAmelCase__ : int = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang='''en-us''' )
lowerCAmelCase__ : List[Any] = tokenizer.decode(tokenizer(__lowerCAmelCase ).input_ids )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCAmelCase__ : List[str] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
lowerCAmelCase__ : Any = tokenizer.decode(sample_ids[0] )
lowerCAmelCase__ : Optional[Any] = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch_tokens[0] )
self.assertEqual(__lowerCAmelCase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowerCAmelCase__ : List[str] = '''Hello how are you'''
lowerCAmelCase__ : Optional[int] = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang='''en-us''' )
self.assertEqual(__lowerCAmelCase , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowerCAmelCase__ : Optional[int] = '''Hello how are you'''
lowerCAmelCase__ : List[Any] = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(__lowerCAmelCase ).input_ids , tokenizer(__lowerCAmelCase , do_phonemize=__lowerCAmelCase ).input_ids )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
lowerCAmelCase__ : Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
lowerCAmelCase__ : Tuple = tokenizer.decode(sample_ids[0] )
lowerCAmelCase__ : Any = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch_tokens[0] )
self.assertEqual(__lowerCAmelCase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
lowerCAmelCase__ : Any = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__lowerCAmelCase )
lowerCAmelCase__ : str = tokenizer.batch_decode(__lowerCAmelCase , filter_word_delimiter_token=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch_tokens[0] )
self.assertEqual(__lowerCAmelCase , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowerCAmelCase__ : Optional[int] = '''Hello how are you'''
lowerCAmelCase__ : int = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang='''en-us''' )
lowerCAmelCase__ : List[Any] = tokenizer.decode(tokenizer(__lowerCAmelCase ).input_ids , filter_word_delimiter_token=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowerCAmelCase__ : Union[str, Any] = '''Hello how are you'''
lowerCAmelCase__ : Dict = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang='''en-us''' )
lowerCAmelCase__ : Tuple = tokenizer.decode(tokenizer(__lowerCAmelCase ).input_ids , filter_word_delimiter_token=__lowerCAmelCase )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , __lowerCAmelCase )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=__lowerCAmelCase )
lowerCAmelCase__ : List[Any] = '''Hello how are you'''
lowerCAmelCase__ : Optional[Any] = tokenizer(__lowerCAmelCase , phonemizer_lang='''en-us''' ).input_ids
lowerCAmelCase__ : List[Any] = tokenizer(__lowerCAmelCase , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase__ : Dict = tokenizer.decode(__lowerCAmelCase )
lowerCAmelCase__ : Dict = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(__lowerCAmelCase , '''ɛ l o h aʊ a ʁ j u''' )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCAmelCase__ : List[Any] = '''Hello how Are you'''
lowerCAmelCase__ : Dict = '''hello how are you'''
lowerCAmelCase__ : Tuple = tokenizer(__lowerCAmelCase ).input_ids
lowerCAmelCase__ : Optional[int] = tokenizer(__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
lowerCAmelCase__ : Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
lowerCAmelCase__ : int = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def lowercase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Dict = [d[key] for d in offsets]
return retrieved_list
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowerCAmelCase__ : List[str] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
lowerCAmelCase__ : Any = tokenizer.decode(__lowerCAmelCase , output_char_offsets=__lowerCAmelCase , filter_word_delimiter_token=__lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(isinstance(outputs_list[0] , __lowerCAmelCase ) )
# transform list to ModelOutput
lowerCAmelCase__ : str = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
[recursive_check(__lowerCAmelCase , __lowerCAmelCase ) for la, la in zip(__lowerCAmelCase , __lowerCAmelCase )]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
lowerCAmelCase__ : Any = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowerCAmelCase__ : str = tokenizer.batch_decode(__lowerCAmelCase , output_char_offsets=__lowerCAmelCase )
lowerCAmelCase__ : Any = [tokenizer.decode(__lowerCAmelCase , output_char_offsets=__lowerCAmelCase ) for ids in sample_ids]
check_list_tuples_equal(__lowerCAmelCase , __lowerCAmelCase )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def lowercase_ ( self ):
"""simple docstring"""
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def lowercase_ ( self ):
"""simple docstring"""
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def lowercase_ ( self ):
"""simple docstring"""
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def lowercase_ ( self ):
"""simple docstring"""
pass
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase__ : Optional[Any] = tokenizer.vocab_size
lowerCAmelCase__ : str = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCAmelCase__ : Tuple = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
lowerCAmelCase__ : Any = tokenizer.add_tokens(__lowerCAmelCase )
lowerCAmelCase__ : Optional[int] = tokenizer.vocab_size
lowerCAmelCase__ : Union[str, Any] = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , all_size + len(__lowerCAmelCase ) )
lowerCAmelCase__ : Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCAmelCase__ : str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
lowerCAmelCase__ : int = tokenizer.add_special_tokens(__lowerCAmelCase )
lowerCAmelCase__ : Union[str, Any] = tokenizer.vocab_size
lowerCAmelCase__ : Tuple = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , all_size_a + len(__lowerCAmelCase ) )
lowerCAmelCase__ : List[str] = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def lowercase_ ( self ):
"""simple docstring"""
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def lowercase_ ( self ):
"""simple docstring"""
pass
def lowercase_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase__ : Tuple = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
lowerCAmelCase__ : int = tokenizer.convert_tokens_to_string(__lowerCAmelCase )
self.assertIsInstance(output['''text'''] , __lowerCAmelCase )
| 233
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = len(lowercase )
for i in range(n - 1 ):
for j in range(i + 1 ,lowercase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if len(lowercase ) <= 1:
return arr, 0
_UpperCAmelCase = len(lowercase ) // 2
_UpperCAmelCase = arr[0:mid]
_UpperCAmelCase = arr[mid:]
_UpperCAmelCase , _UpperCAmelCase = count_inversions_recursive(lowercase )
_UpperCAmelCase , _UpperCAmelCase = count_inversions_recursive(lowercase )
_UpperCAmelCase , _UpperCAmelCase = _count_cross_inversions(lowercase ,lowercase )
_UpperCAmelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = _UpperCAmelCase = _UpperCAmelCase = 0
while i < len(lowercase ) and j < len(lowercase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowercase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowercase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_UpperCAmelCase = count_inversions_bf(lowercase )
_UpperCAmelCase , _UpperCAmelCase = count_inversions_recursive(lowercase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ ,lowercase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_UpperCAmelCase = count_inversions_bf(lowercase )
_UpperCAmelCase , _UpperCAmelCase = count_inversions_recursive(lowercase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ ,lowercase )
# an empty list should also have zero inversions
_UpperCAmelCase = []
_UpperCAmelCase = count_inversions_bf(lowercase )
_UpperCAmelCase , _UpperCAmelCase = count_inversions_recursive(lowercase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ ,lowercase )
if __name__ == "__main__":
main()
| 277
| 0
|
'''simple docstring'''
def snake_case_ ( a__ : Union[str, Any] = 10 ,a__ : List[str] = 22 ):
"""simple docstring"""
__lowercase = range(1 ,_snake_case )
__lowercase = range(1 ,_snake_case )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 708
|
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : Tuple = """▁"""
A : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class SCREAMING_SNAKE_CASE( __A , unittest.TestCase ):
snake_case_ : Optional[int] = BertGenerationTokenizer
snake_case_ : List[str] = False
snake_case_ : Optional[int] = True
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
__lowercase = BertGenerationTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
__lowercase = """<s>"""
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowerCamelCase__ ) , 1002 )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
__lowercase = BertGenerationTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
__lowercase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [285, 46, 10, 170, 382] , )
__lowercase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowercase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__lowercase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def snake_case__ ( self ) -> Any:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = """Hello World!"""
__lowercase = [1_8536, 2260, 101]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def snake_case__ ( self ) -> Any:
"""simple docstring"""
__lowercase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__lowercase = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@require_torch
@slow
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__lowercase = list(self.big_tokenizer.get_vocab().keys() )[:10]
__lowercase = """ """.join(lowerCamelCase__ )
__lowercase = self.big_tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" , return_token_type_ids=lowerCamelCase__ )
__lowercase = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowerCamelCase__ )
__lowercase = BertGenerationConfig()
__lowercase = BertGenerationEncoder(lowerCamelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase__ )
model(**lowerCamelCase__ )
@slow
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase = {"""input_ids""": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 163
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : int = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "bert"
def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1e-12 , UpperCamelCase=0 , UpperCamelCase="absolute" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = use_cache
lowerCamelCase_ = classifier_dropout
class snake_case ( lowercase ):
"""simple docstring"""
@property
def snake_case ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 675
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : Optional[Any] = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 675
| 1
|
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class UpperCAmelCase_ ( __lowercase , __lowercase ):
lowerCamelCase : Dict = 1
@register_to_config
def __init__( self : Optional[Any] , UpperCAmelCase__ : int = 1_0_0_0 , UpperCAmelCase__ : Optional[Union[np.ndarray, List[float]]] = None ) -> str:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(UpperCAmelCase__ )
# standard deviation of the initial noise distribution
lowerCAmelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowerCAmelCase = 4
# running values
lowerCAmelCase = []
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, torch.device] = None ) -> int:
lowerCAmelCase = num_inference_steps
lowerCAmelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowerCAmelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowerCAmelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowerCAmelCase = torch.sin(steps * math.pi / 2 ) ** 2
lowerCAmelCase = (1.0 - self.betas**2) ** 0.5
lowerCAmelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowerCAmelCase = timesteps.to(UpperCAmelCase__ )
lowerCAmelCase = []
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
lowerCAmelCase = (self.timesteps == timestep).nonzero().item()
lowerCAmelCase = timestep_index + 1
lowerCAmelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCAmelCase__ )
if len(self.ets ) == 1:
lowerCAmelCase = self.ets[-1]
elif len(self.ets ) == 2:
lowerCAmelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowerCAmelCase = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
lowerCAmelCase = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
lowerCAmelCase = self._get_prev_sample(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : torch.FloatTensor , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[Any] ) -> torch.FloatTensor:
return sample
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : int ) -> List[str]:
lowerCAmelCase = self.alphas[timestep_index]
lowerCAmelCase = self.betas[timestep_index]
lowerCAmelCase = self.alphas[prev_timestep_index]
lowerCAmelCase = self.betas[prev_timestep_index]
lowerCAmelCase = (sample - sigma * ets) / max(UpperCAmelCase__ , 1E-8 )
lowerCAmelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ) -> List[Any]:
return self.config.num_train_timesteps
| 513
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case ={
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""ChineseCLIPFeatureExtractor"""]
__snake_case =["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 513
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_a: Dict = r"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(lowercase )
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = 'rag'
SCREAMING_SNAKE_CASE__ = True
def __init__( self : str , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Tuple=True , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict=" / " , lowerCAmelCase : Any=" // " , lowerCAmelCase : str=5 , lowerCAmelCase : Optional[int]=300 , lowerCAmelCase : List[Any]=768 , lowerCAmelCase : Optional[int]=8 , lowerCAmelCase : str="wiki_dpr" , lowerCAmelCase : Optional[Any]="train" , lowerCAmelCase : str="compressed" , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : str=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Tuple=False , lowerCAmelCase : int=False , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(
bos_token_id=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , prefix=lowerCAmelCase , vocab_size=lowerCAmelCase , **lowerCAmelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase_ = kwargs.pop("question_encoder" )
UpperCAmelCase_ = question_encoder_config.pop("model_type" )
UpperCAmelCase_ = kwargs.pop("generator" )
UpperCAmelCase_ = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ = AutoConfig.for_model(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = AutoConfig.for_model(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = reduce_loss
UpperCAmelCase_ = label_smoothing
UpperCAmelCase_ = exclude_bos_score
UpperCAmelCase_ = do_marginalize
UpperCAmelCase_ = title_sep
UpperCAmelCase_ = doc_sep
UpperCAmelCase_ = n_docs
UpperCAmelCase_ = max_combined_length
UpperCAmelCase_ = dataset
UpperCAmelCase_ = dataset_split
UpperCAmelCase_ = index_name
UpperCAmelCase_ = retrieval_vector_size
UpperCAmelCase_ = retrieval_batch_size
UpperCAmelCase_ = passages_path
UpperCAmelCase_ = index_path
UpperCAmelCase_ = use_dummy_dataset
UpperCAmelCase_ = output_retrieved
UpperCAmelCase_ = do_deduplication
UpperCAmelCase_ = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase_ = getattr(self.generator , "forced_eos_token_id" , lowerCAmelCase )
@classmethod
def __A ( cls : Any , lowerCAmelCase : PretrainedConfig , lowerCAmelCase : PretrainedConfig , **lowerCAmelCase : str ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowerCAmelCase )
def __A ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.question_encoder.to_dict()
UpperCAmelCase_ = self.generator.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 162
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a: Union[str, Any] = logging.get_logger(__name__)
_a: Dict = {"""tokenizer_file""": """tokenizer.json"""}
_a: Dict = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE__ = None
def __init__( self : Tuple , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : int=None , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]="<unk>" , lowerCAmelCase : int="<s>" , lowerCAmelCase : Dict="</s>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : int=False , lowerCAmelCase : Optional[int]=False , **lowerCAmelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCAmelCase ) != add_prefix_space:
UpperCAmelCase_ = getattr(lowerCAmelCase , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**lowerCAmelCase )
UpperCAmelCase_ = add_prefix_space
def __A ( self : int , *lowerCAmelCase : Any , **lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = kwargs.get("is_split_into_words" , lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def __A ( self : Dict , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = kwargs.get("is_split_into_words" , lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def __A ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def __A ( self : str , lowerCAmelCase : "Conversation" ):
'''simple docstring'''
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
| 162
| 1
|
import unittest
import numpy as np
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case = None , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ : Dict = np.shape(__snake_case )
UpperCAmelCase_ : List[Any] = np.shape(__snake_case )
UpperCAmelCase_ : Tuple = np.shape(__snake_case )
if shape_a[0] != shape_b[0]:
UpperCAmelCase_ : Union[str, Any] = (
"Expected the same number of rows for A and B. "
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(__snake_case )
if shape_b[1] != shape_c[1]:
UpperCAmelCase_ : Optional[int] = (
"Expected the same number of columns for B and C. "
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(__snake_case )
UpperCAmelCase_ : Any = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase_ : Optional[Any] = np.linalg.inv(__snake_case )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class snake_case_ (unittest.TestCase ):
"""simple docstring"""
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
UpperCAmelCase_ : Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]])
UpperCAmelCase_ : Dict = np.array([[2, 1], [6, 3]])
UpperCAmelCase_ : Any = schur_complement(__a ,__a ,__a)
UpperCAmelCase_ : Dict = np.block([[a, b], [b.T, c]])
UpperCAmelCase_ : Union[str, Any] = np.linalg.det(__a)
UpperCAmelCase_ : Dict = np.linalg.det(__a)
UpperCAmelCase_ : Any = np.linalg.det(__a)
self.assertAlmostEqual(__a ,det_a * det_s)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]])
UpperCAmelCase_ : List[Any] = np.array([[2, 1], [6, 3]])
with self.assertRaises(__a):
schur_complement(__a ,__a ,__a)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
UpperCAmelCase_ : Tuple = np.array([[0, 3], [3, 0], [2, 3]])
UpperCAmelCase_ : Optional[Any] = np.array([[2, 1, 3], [6, 3, 5]])
with self.assertRaises(__a):
schur_complement(__a ,__a ,__a)
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class snake_case_ (lowercase__ , lowercase__ ):
"""simple docstring"""
_lowerCamelCase = """focalnet"""
def __init__( self ,lowercase=224 ,lowercase=4 ,lowercase=3 ,lowercase=96 ,lowercase=False ,lowercase=[192, 384, 768, 768] ,lowercase=[2, 2, 6, 2] ,lowercase=[2, 2, 2, 2] ,lowercase=[3, 3, 3, 3] ,lowercase="gelu" ,lowercase=4.0 ,lowercase=0.0 ,lowercase=0.1 ,lowercase=False ,lowercase=1E-4 ,lowercase=False ,lowercase=False ,lowercase=False ,lowercase=0.02 ,lowercase=1E-5 ,lowercase=32 ,lowercase=None ,lowercase=None ,**lowercase ,):
"""simple docstring"""
super().__init__(**lowercase)
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : Dict = embed_dim
UpperCAmelCase_ : Optional[int] = use_conv_embed
UpperCAmelCase_ : int = hidden_sizes
UpperCAmelCase_ : Optional[int] = depths
UpperCAmelCase_ : Optional[Any] = focal_levels
UpperCAmelCase_ : Tuple = focal_windows
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Any = mlp_ratio
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : List[str] = use_layerscale
UpperCAmelCase_ : List[str] = layerscale_value
UpperCAmelCase_ : List[str] = use_post_layernorm
UpperCAmelCase_ : Dict = use_post_layernorm_in_modulation
UpperCAmelCase_ : int = normalize_modulator
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : str = layer_norm_eps
UpperCAmelCase_ : Union[str, Any] = encoder_stride
UpperCAmelCase_ : str = ["stem"] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ : Any = get_aligned_output_features_output_indices(
out_features=lowercase ,out_indices=lowercase ,stage_names=self.stage_names)
| 455
| 0
|
from collections import defaultdict
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Dict = True
for v in tree[start]:
if v not in visited:
ret += dfs(_lowerCamelCase )
if ret % 2 == 0:
cuts.append(_lowerCamelCase )
return ret
def A ( ):
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
_snake_case, _snake_case = 10, 9
_snake_case = defaultdict(list)
_snake_case = {}
_snake_case = []
_snake_case = 0
_snake_case = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 500
|
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [False] * len(_lowerCamelCase )
_lowerCAmelCase : str = []
queue.append(_lowerCamelCase )
_lowerCAmelCase : List[str] = True
while queue:
_lowerCAmelCase : Any = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : List[str] = u
return visited[t]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [-1] * (len(_lowerCamelCase ))
_lowerCAmelCase : Tuple = 0
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = float("Inf" )
_lowerCAmelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
_lowerCAmelCase : Optional[int] = min(_lowerCamelCase , graph[parent[s]][s] )
_lowerCAmelCase : List[Any] = parent[s]
max_flow += path_flow
_lowerCAmelCase : Optional[Any] = sink
while v != source:
_lowerCAmelCase : Tuple = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCAmelCase : Optional[Any] = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case, _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 500
| 1
|
"""simple docstring"""
from string import ascii_uppercase
_A : Dict = {char: i for i, char in enumerate(ascii_uppercase)}
_A : Dict = dict(enumerate(ascii_uppercase))
def __magic_name__ ( __snake_case : str , __snake_case : str ) -> str:
lowercase : Union[str, Any] = len(__snake_case )
lowercase : Union[str, Any] = 0
while True:
if x == i:
lowercase : Tuple = 0
if len(__snake_case ) == len(__snake_case ):
break
key += key[i]
i += 1
return key
def __magic_name__ ( __snake_case : str , __snake_case : str ) -> str:
lowercase : Optional[Any] = ""
lowercase : Dict = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowercase : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __magic_name__ ( __snake_case : str , __snake_case : str ) -> str:
lowercase : Dict = ""
lowercase : int = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowercase : Any = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __magic_name__ ( ) -> None:
lowercase : Any = "THE GERMAN ATTACK"
lowercase : Any = "SECRET"
lowercase : Dict = generate_key(__snake_case , __snake_case )
lowercase : int = cipher_text(__snake_case , __snake_case )
print(f"""Encrypted Text = {s}""" )
print(f"""Original Text = {original_text(__snake_case , __snake_case )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 706
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_A : int = logging.get_logger(__name__)
class a__ ( a_ ):
def __init__( self , *_a , **_a ):
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _a , )
super().__init__(*_a , **_a )
| 518
| 0
|
'''simple docstring'''
import random
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
'''simple docstring'''
_A = a[left_index]
_A = left_index + 1
for j in range(left_index + 1 , __lowercase ):
if a[j] < pivot:
_A , _A = a[i], a[j]
i += 1
_A , _A = a[i - 1], a[left_index]
return i - 1
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
if left < right:
_A = random.randint(__lowercase , right - 1 )
_A , _A = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_A = partition(__lowercase , __lowercase , __lowercase )
quick_sort_random(
__lowercase , __lowercase , __lowercase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowercase , pivot_index + 1 , __lowercase ) # recursive quicksort to the right of the pivot point
def __lowercase ( ) -> List[str]:
'''simple docstring'''
_A = input("Enter numbers separated by a comma:\n" ).strip()
_A = [int(__lowercase ) for item in user_input.split("," )]
quick_sort_random(__lowercase , 0 , len(__lowercase ) )
print(__lowercase )
if __name__ == "__main__":
main()
| 330
|
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE__:List[str] = logging.getLogger(__name__)
def _lowerCamelCase( ):
__a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=a , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=a , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=a , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=a , default=1_0_0_0 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=a , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=a , type=a , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=a , default=5_1_2 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=a , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
__a = parser.parse_args()
return args
def _lowerCamelCase( a ):
def fn(a ):
return tokenizer(examples["text"] )
return fn
def _lowerCamelCase( a ):
__a = []
for i in range(len(tokenized_data["input_ids"] ) ):
__a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
__a = tf.train.Features(feature=a )
__a = tf.train.Example(features=a )
__a = example.SerializeToString()
records.append(a )
return records
def _lowerCamelCase( a ):
__a = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__a = min(len(a ) , args.limit )
__a = dataset.select(range(a ) )
print(F"Limiting the dataset to {args.limit} entries." )
__a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__a = os.path.join(args.output_dir , args.split )
if not os.path.exists(a ):
os.makedirs(a )
else:
__a = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__a = tokenize_function(a )
__a = dataset.map(a , batched=a , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a ):
# Concatenate all texts.
__a = {k: sum(examples[k] , [] ) for k in examples.keys()}
__a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__a = {
k: [t[i : i + args.max_length] for i in range(0 , a , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__a = dataset_tokenized.map(a , batched=a , batch_size=1_0_0_0 , num_proc=4 )
__a = 0
__a = 0
for shard in range(0 , len(a ) , args.shard_size ):
__a = grouped_dataset[shard : shard + args.shard_size]
__a = len(dataset_snapshot["input_ids"] )
__a = os.path.join(a , F"dataset-{shard_count}-{records_containing}.tfrecord" )
__a = get_serialized_examples(a )
with tf.io.TFRecordWriter(a ) as out_file:
for i in range(len(a ) ):
__a = serialized_examples[i]
out_file.write(a )
print("Wrote file {} containing {} records".format(a , a ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , "w" ) as f:
print(F"Total {args.split} records: {total_records}" , file=a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = parse_args()
main(args)
| 528
| 0
|
"""simple docstring"""
from math import pow
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
A = int(pow(UpperCamelCase , UpperCamelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
A, A = backtrack(
UpperCamelCase , UpperCamelCase , current_number + 1 , UpperCamelCase , UpperCamelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
A, A = backtrack(
UpperCamelCase , UpperCamelCase , current_number + 1 , UpperCamelCase , UpperCamelCase )
return current_sum, solutions_count
def A__ ( UpperCamelCase , UpperCamelCase ):
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(UpperCamelCase , UpperCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 524
|
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self :List[Any] ):
A = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
A = AutoTokenizer.from_pretrained("google/mt5-small" )
A = tokenizer("Hello there" , return_tensors="np" ).input_ids
A = tokenizer("Hi I am" , return_tensors="np" ).input_ids
A = shift_tokens_right(__UpperCamelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
A = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase ).logits
A = optax.softmax_cross_entropy(__UpperCamelCase , onehot(__UpperCamelCase , logits.shape[-1] ) ).mean()
A = -(labels.shape[-1] * loss.item())
A = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 524
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if len(__UpperCamelCase ) <= 1 or n <= 1:
return
insert_next(__UpperCamelCase , n - 1 )
rec_insertion_sort(__UpperCamelCase , n - 1 )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if index >= len(__UpperCamelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCAmelCase__ , UpperCAmelCase__ : Any = (
collection[index],
collection[index - 1],
)
insert_next(__UpperCamelCase , index + 1 )
if __name__ == "__main__":
__UpperCAmelCase = input('Enter integers separated by spaces: ')
__UpperCAmelCase = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 65
|
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_UpperCamelCase = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def _A( lowerCAmelCase ):
A__ : Any = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
_UpperCamelCase = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def _A( lowerCAmelCase ):
A__ : str = list(s_dict.keys() )
for key in keys:
A__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ : Optional[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(F'''{key} -> {new_key}''' )
A__ : Any = s_dict.pop(lowerCAmelCase )
return s_dict
def _A( lowerCAmelCase ):
A__ , A__ : Union[str, Any] = emb.weight.shape
A__ : Optional[Any] = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
A__ : int = emb.weight.data
return lin_layer
def _A( lowerCAmelCase , lowerCAmelCase ):
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
A__ : Union[str, Any] = os.path.basename(lowerCAmelCase )
A__ : str = url.split("""/""" )[-2]
A__ : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
if os.path.exists(lowerCAmelCase ) and not os.path.isfile(lowerCAmelCase ):
raise RuntimeError(F'''{download_target} exists and is not a regular file''' )
if os.path.isfile(lowerCAmelCase ):
A__ : Tuple = open(lowerCAmelCase , """rb""" ).read()
if hashlib.shaaaa(lowerCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(lowerCAmelCase ) as source, open(lowerCAmelCase , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowerCAmelCase , unit_divisor=1024 ) as loop:
while True:
A__ : Dict = source.read(8192 )
if not buffer:
break
output.write(lowerCAmelCase )
loop.update(len(lowerCAmelCase ) )
A__ : List[str] = open(lowerCAmelCase , """rb""" ).read()
if hashlib.shaaaa(lowerCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def _A( lowerCAmelCase , lowerCAmelCase ):
if ".pt" not in checkpoint_path:
A__ : str = _download(_MODELS[checkpoint_path] )
else:
A__ : Optional[int] = torch.load(lowerCAmelCase , map_location="""cpu""" )
A__ : List[str] = original_checkpoint["""dims"""]
A__ : List[Any] = original_checkpoint["""model_state_dict"""]
A__ : Dict = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowerCAmelCase )
rename_keys(lowerCAmelCase )
A__ : Any = True
A__ : List[str] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
A__ : str = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowerCAmelCase , decoder_ffn_dim=lowerCAmelCase , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ : int = WhisperForConditionalGeneration(lowerCAmelCase )
A__ , A__ : List[Any] = model.model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
if len(lowerCAmelCase ) > 0 and not set(lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
A__ : Tuple = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : Any = proj_out_weights
model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_UpperCamelCase = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 363
| 0
|
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Tuple = len(__a )
for i in range(length - 1 ):
_a : List[Any] = i
for k in range(i + 1 , __a ):
if collection[k] < collection[least]:
_a : Union[str, Any] = k
if least != i:
_a, _a : Tuple = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__lowerCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 319
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : Union[str, Any] ,_a : Union[str, Any]=13 ,_a : Any=32 ,_a : Optional[Any]=2 ,_a : Any=3 ,_a : str=16 ,_a : Tuple=[1, 2, 1] ,_a : Tuple=[2, 2, 4] ,_a : Any=2 ,_a : Optional[int]=2.0 ,_a : List[Any]=True ,_a : str=0.0 ,_a : Tuple=0.0 ,_a : Optional[Any]=0.1 ,_a : Dict="gelu" ,_a : Union[str, Any]=False ,_a : Any=True ,_a : Any=0.02 ,_a : List[Any]=1E-5 ,_a : Any=True ,_a : List[str]=None ,_a : str=True ,_a : Optional[int]=10 ,_a : List[str]=8 ,):
'''simple docstring'''
_a : Dict = parent
_a : str = batch_size
_a : Optional[int] = image_size
_a : str = patch_size
_a : Optional[int] = num_channels
_a : List[Any] = embed_dim
_a : Optional[Any] = depths
_a : Optional[int] = num_heads
_a : str = window_size
_a : Any = mlp_ratio
_a : Optional[Any] = qkv_bias
_a : Optional[Any] = hidden_dropout_prob
_a : Union[str, Any] = attention_probs_dropout_prob
_a : Union[str, Any] = drop_path_rate
_a : Union[str, Any] = hidden_act
_a : Union[str, Any] = use_absolute_embeddings
_a : str = patch_norm
_a : Tuple = layer_norm_eps
_a : List[Any] = initializer_range
_a : Optional[int] = is_training
_a : str = scope
_a : List[str] = use_labels
_a : int = type_sequence_label_size
_a : List[str] = encoder_stride
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : List[Any] = None
if self.use_labels:
_a : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : str ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def __lowercase ( self : str ,_a : Tuple ,_a : Tuple ,_a : Any ):
'''simple docstring'''
_a : List[Any] = SwinvaModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[Any] = model(_a )
_a : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_a : Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def __lowercase ( self : Union[str, Any] ,_a : List[str] ,_a : Tuple ,_a : List[Any] ):
'''simple docstring'''
_a : int = SwinvaForMaskedImageModeling(config=_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_a : List[Any] = 1
_a : str = SwinvaForMaskedImageModeling(_a )
model.to(_a )
model.eval()
_a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def __lowercase ( self : Any ,_a : List[Any] ,_a : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
_a : Optional[Any] = self.type_sequence_label_size
_a : str = SwinvaForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Any = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__UpperCAmelCase : Any = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Any = False
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = SwinvaModelTester(self )
_a : Any = ConfigTester(self ,config_class=_a ,embed_dim=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def __lowercase ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def __lowercase ( self : Tuple ):
'''simple docstring'''
pass
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a, _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_a )
_a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : int = True
for model_class in self.all_model_classes:
_a : Optional[Any] = True
_a : List[str] = False
_a : Tuple = True
_a : Dict = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(_a ,_a ) )
_a : Any = outputs.attentions
_a : Optional[int] = len(self.model_tester.depths )
self.assertEqual(len(_a ) ,_a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a : Optional[int] = True
_a : Dict = config.window_size**2
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a ,_a ) )
_a : Union[str, Any] = outputs.attentions
self.assertEqual(len(_a ) ,_a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
_a : str = len(_a )
# Check attention is always last and order is fine
_a : int = True
_a : int = True
_a : Union[str, Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Optional[int] = model(**self._prepare_for_class(_a ,_a ) )
if hasattr(self.model_tester ,'num_hidden_states_types' ):
_a : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_a : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states ,len(_a ) )
_a : str = outputs.attentions
self.assertEqual(len(_a ) ,_a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def __lowercase ( self : Optional[int] ,_a : str ,_a : Union[str, Any] ,_a : Any ,_a : Union[str, Any] ):
'''simple docstring'''
_a : int = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Optional[int] = model(**self._prepare_for_class(_a ,_a ) )
_a : Any = outputs.hidden_states
_a : str = getattr(
self.model_tester ,'expected_num_hidden_layers' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_a ) ,_a )
# Swinv2 has a different seq_length
_a : List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
_a : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(_a ) ,_a )
_a, _a, _a, _a : Optional[int] = reshaped_hidden_states[0].shape
_a : str = (
reshaped_hidden_states[0].view(_a ,_a ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def __lowercase ( self : Dict ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_a : List[Any] = True
self.check_hidden_states_output(_a ,_a ,_a ,_a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Any = True
self.check_hidden_states_output(_a ,_a ,_a ,_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a, _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[Any] = 3
_a : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_a : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_a : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_a : int = True
self.check_hidden_states_output(_a ,_a ,_a ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[Any] = True
self.check_hidden_states_output(_a ,_a ,_a ,(padded_height, padded_width) )
def __lowercase ( self : str ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_a )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : str = SwinvaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : int = _config_zero_init(_a )
for model_class in self.all_model_classes:
_a : int = model_class(config=_a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[Any] = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
_a )
_a : Union[str, Any] = self.default_image_processor
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_a : Dict = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Dict = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : int = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 319
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase__ : Dict = 25_00_04
lowercase__ : List[Any] = 25_00_20
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = MBartTokenizer
_snake_case : Tuple = MBartTokenizerFast
_snake_case : Union[str, Any] = True
_snake_case : Tuple = True
def snake_case__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_UpperCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = 'facebook/mbart-large-en-ro'
_snake_case : List[Any] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_snake_case : Any = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_snake_case : Any = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case__ ( cls : str ) -> int:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_UpperCamelCase = 1
return cls
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
_UpperCamelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
_UpperCamelCase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def snake_case__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
_UpperCamelCase = 10
_UpperCamelCase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] )
def snake_case__ ( self : int ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' )
_UpperCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' )
_UpperCamelCase = targets['''input_ids''']
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 250004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 98
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase__ : str = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ : Tuple = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : Optional[int] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ : int = model(snake_case__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case__ , atol=1e-3 ) )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCAmelCase__ : Tuple = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ : Union[str, Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(snake_case__ )["last_hidden_state"].detach()
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case__ , atol=1e-3 ) )
| 438
| 0
|
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
SCREAMING_SNAKE_CASE__ = 6
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 19_01
SCREAMING_SNAKE_CASE__ = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
SCREAMING_SNAKE_CASE__ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
SCREAMING_SNAKE_CASE__ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
SCREAMING_SNAKE_CASE__ = day - days_per_month[month - 2]
if month > 12:
year += 1
SCREAMING_SNAKE_CASE__ = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 718
|
import random
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = a[left_index]
SCREAMING_SNAKE_CASE__ = left_index + 1
for j in range(left_index + 1 , _A ):
if a[j] < pivot:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = a[i], a[j]
i += 1
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = a[i - 1], a[left_index]
return i - 1
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if left < right:
SCREAMING_SNAKE_CASE__ = random.randint(_A , right - 1 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
SCREAMING_SNAKE_CASE__ = partition(_A , _A , _A )
quick_sort_random(
_A , _A , _A ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_A , pivot_index + 1 , _A ) # recursive quicksort to the right of the pivot point
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = input('''Enter numbers separated by a comma:\n''' ).strip()
SCREAMING_SNAKE_CASE__ = [int(_A ) for item in user_input.split(''',''' )]
quick_sort_random(_A , 0 , len(_A ) )
print(_A )
if __name__ == "__main__":
main()
| 472
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = LEDTokenizer
SCREAMING_SNAKE_CASE_ : Any = LEDTokenizerFast
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def __A ( self ) -> Dict:
super().setUp()
SCREAMING_SNAKE_CASE = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
SCREAMING_SNAKE_CASE = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase__ ) )
def __A ( self , **lowerCAmelCase__ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __A ( self , **lowerCAmelCase__ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> Optional[int]:
return "lower newer", "lower newer"
@cached_property
def __A ( self ) -> Any:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def __A ( self ) -> List[str]:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
SCREAMING_SNAKE_CASE = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_torch
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='pt' )
self.assertIn('input_ids' , lowerCAmelCase__ )
self.assertIn('attention_mask' , lowerCAmelCase__ )
self.assertNotIn('labels' , lowerCAmelCase__ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase__ )
@require_torch
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE = tokenizer(text_target=lowerCAmelCase__ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def __A ( self ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE = tokenizer(
['I am a small frog' * 1_024, 'I am a small frog'] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.']
SCREAMING_SNAKE_CASE = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE = tokenizer(text_target=lowerCAmelCase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE = inputs['input_ids']
SCREAMING_SNAKE_CASE = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __A ( self ) -> str:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE = ['Summary of the text.', 'Another summary.']
SCREAMING_SNAKE_CASE = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[0] * len(lowerCAmelCase__ ) for x in encoded_output['input_ids']]
SCREAMING_SNAKE_CASE = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase__ )
def __A ( self ) -> int:
pass
def __A ( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 247
|
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCamelCase = logging.getLogger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=16 , SCREAMING_SNAKE_CASE_ : int = 10 , SCREAMING_SNAKE_CASE_ : int = 2 ) -> List[str]:
def get_dataset(SCREAMING_SNAKE_CASE_ : int ):
SCREAMING_SNAKE_CASE = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(SCREAMING_SNAKE_CASE_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
SCREAMING_SNAKE_CASE = get_dataset(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = get_dataset(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
SCREAMING_SNAKE_CASE = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=None ) -> Any:
SCREAMING_SNAKE_CASE = []
for epoch in range(SCREAMING_SNAKE_CASE_ ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch
SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> Dict:
super().__init__()
SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) )
def __A ( self , lowerCAmelCase__ ) -> Dict:
return x * self.a + self.b
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(total_limit=1 , project_dir=lowerCAmelCase__ , automatic_checkpoint_naming=lowerCAmelCase__ )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , 'initial' )
accelerator.save_state(lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
SCREAMING_SNAKE_CASE = train(3 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.load_state(lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = train(2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save everything
SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , 'checkpoint' )
accelerator.save_state(lowerCAmelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(lowerCAmelCase__ )
test_rands += train(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase__ )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
accelerator.save_state()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
SCREAMING_SNAKE_CASE = train(3 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.load_state(os.path.join(lowerCAmelCase__ , 'checkpoints' , 'checkpoint_0' ) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = train(2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCAmelCase__ , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE = torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE = Accelerator()
with self.assertRaises(lowerCAmelCase__ ) as ve:
accelerator.register_for_checkpointing(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __A ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE = torch.optim.lr_scheduler.StepLR(lowerCAmelCase__ , step_size=1 , gamma=0.99 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase__ )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE = scheduler.state_dict()
train(3 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotEqual(lowerCAmelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCAmelCase__ , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(lowerCAmelCase__ , scheduler.state_dict() )
def __A ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase__ , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = accelerator.prepare(lowerCAmelCase__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCAmelCase__ , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCamelCase = '''/tmp/accelerate/state_checkpointing'''
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCamelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__UpperCamelCase,__UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCamelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCamelCase,__UpperCamelCase,__UpperCamelCase,__UpperCamelCase,__UpperCamelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCamelCase,__UpperCamelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCamelCase = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
__UpperCamelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
__UpperCamelCase = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
__UpperCamelCase = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 247
| 1
|
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
if len(SCREAMING_SNAKE_CASE__) < k or k < 0:
raise ValueError("""Invalid Input""")
__snake_case: int = sum(array[:k])
for i in range(len(SCREAMING_SNAKE_CASE__) - k):
__snake_case: Optional[int] = current_sum - array[i] + array[i + k]
__snake_case: int = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__UpperCAmelCase : Dict = [randint(-1_000, 1_000) for i in range(100)]
__UpperCAmelCase : Tuple = randint(0, 110)
print(f'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
| 155
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[Any] ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=A , )
assert hasattr(self , """env""" )
def UpperCAmelCase__ ( self : int , A : List[Any] ):
__snake_case: Optional[int] = f'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
__snake_case: Optional[int] = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=A , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=A , py_version="""py36""" , )
def UpperCAmelCase__ ( self : List[Any] , A : int ):
TrainingJobAnalytics(A ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def UpperCAmelCase__ ( self : str , A : Any ):
# create estimator
__snake_case: str = self.create_estimator(A )
# run training
estimator.fit()
# result dataframe
__snake_case: Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__snake_case: Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
__snake_case: List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__snake_case: Tuple = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , A )
| 155
| 1
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowercase : List[Any] =Lock()
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowercase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase_ =rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase_ =min(lowercase__ , lowercase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowercase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase_ =lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase_ =max(lowercase__ , lowercase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowercase__ )
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[]
UpperCAmelCase_ =[]
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase_ =Pipe()
UpperCAmelCase_ =Pipe()
process_array_.append(
Process(
target=lowercase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase_ =temp_rs
UpperCAmelCase_ =temp_rr
for i in range(1 , len(lowercase__ ) - 1 ):
UpperCAmelCase_ =Pipe()
UpperCAmelCase_ =Pipe()
process_array_.append(
Process(
target=lowercase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase_ =temp_rs
UpperCAmelCase_ =temp_rr
process_array_.append(
Process(
target=lowercase__ , args=(
len(lowercase__ ) - 1,
arr[len(lowercase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowercase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowercase__ ) ):
UpperCAmelCase_ =result_pipe[p][0].recv()
process_array_[p].join()
return arr
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =list(range(1_0 , 0 , -1 ) )
print("Initial List" )
print(*lowercase__ )
UpperCAmelCase_ =odd_even_transposition(lowercase__ )
print("Sorted List\n" )
print(*lowercase__ )
if __name__ == "__main__":
main()
| 54
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , *a_ , **a_ ):
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_ )
| 250
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 721
|
'''simple docstring'''
from math import sqrt
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE = 100_0000 ):
SCREAMING_SNAKE_CASE_ :int = 0
SCREAMING_SNAKE_CASE_ :int = 0
SCREAMING_SNAKE_CASE_ :int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(SCREAMING_SNAKE_CASE , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 233
| 0
|
def lowercase__ ( A_: int = 1000 ) -> int:
"""simple docstring"""
__UpperCAmelCase =3
__UpperCAmelCase =0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 68
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : List[str] = use_input_mask
UpperCAmelCase__ : Tuple = use_token_type_ids
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Any = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : List[str] = coordinate_size
UpperCAmelCase__ : Tuple = shape_size
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase__ : str = text_seq_length
UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1
UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCAmelCase__ : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ : str = bbox[i, j, 3]
UpperCAmelCase__ : Dict = bbox[i, j, 1]
UpperCAmelCase__ : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ : Optional[int] = bbox[i, j, 2]
UpperCAmelCase__ : Any = bbox[i, j, 0]
UpperCAmelCase__ : List[Any] = tmp_coordinate
UpperCAmelCase__ : str = tf.constant(_A )
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A )
# text + image
UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A )
UpperCAmelCase__ : Tuple = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , )
UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase__ : Any = model(_A , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A )
UpperCAmelCase__ : Union[str, Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A )
UpperCAmelCase__ : Optional[int] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A )
UpperCAmelCase__ : str = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ):
'''simple docstring'''
return True
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = copy.deepcopy(_A )
if model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = {
k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_A )
if getattr(_A , '''hf_compute_loss''' , _A ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0]
]
UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
UpperCAmelCase__ : List[Any] = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCAmelCase__ : Any = -100
UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A )
UpperCAmelCase__ : int = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Dict = model(_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
# Get keys that were added with the _prepare_for_class function
UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys()
UpperCAmelCase__ : int = inspect.signature(model.call ).parameters
UpperCAmelCase__ : Union[str, Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCAmelCase__ : Dict = {0: '''input_ids'''}
for label_key in label_keys:
UpperCAmelCase__ : str = signature_names.index(_A )
UpperCAmelCase__ : List[Any] = label_key
UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCAmelCase__ : Tuple = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCAmelCase__ : Any = prepared_for_class[value]
UpperCAmelCase__ : Tuple = tuple(_A )
# Send to model
UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase_ ( self : int ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Union[str, Any] = type
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_A , _A , _A , _A , _A , _A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values
UpperCAmelCase__ : str = tf.constant([[1, 2]] )
UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A )
# verify the logits
UpperCAmelCase__ : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _A )
UpperCAmelCase__ : Dict = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
| 75
| 0
|
from math import factorial, radians
def A ( _UpperCAmelCase : float , _UpperCAmelCase : int = 18 , _UpperCAmelCase : int = 10 ) -> float:
'''simple docstring'''
_UpperCAmelCase = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
_UpperCAmelCase = radians(_UpperCAmelCase )
_UpperCAmelCase = angle_in_radians
_UpperCAmelCase = 3
_UpperCAmelCase = -1
for _ in range(_UpperCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(_UpperCAmelCase )
_UpperCAmelCase = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
__import__("doctest").testmod()
| 639
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 1
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
# Load configuration defined in the metadata file
with open(snake_case ) as metadata_file:
SCREAMING_SNAKE_CASE:str = json.load(snake_case )
SCREAMING_SNAKE_CASE:List[str] = LukeConfig(use_entity_aware_attention=snake_case , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE:Tuple = torch.load(snake_case , map_location="cpu" )
# Load the entity vocab file
SCREAMING_SNAKE_CASE:Dict = load_entity_vocab(snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE:Dict = AddedToken("<ent>" , lstrip=snake_case , rstrip=snake_case )
SCREAMING_SNAKE_CASE:List[Any] = AddedToken("<ent2>" , lstrip=snake_case , rstrip=snake_case )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(snake_case )
with open(os.path.join(snake_case , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(snake_case , snake_case )
SCREAMING_SNAKE_CASE:str = LukeTokenizer.from_pretrained(snake_case )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE:Optional[Any] = state_dict["embeddings.word_embeddings.weight"]
SCREAMING_SNAKE_CASE:Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE:Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE:Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE:Union[str, Any] = F'''encoder.layer.{layer_index}.attention.self.'''
SCREAMING_SNAKE_CASE:List[Any] = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE:List[Any] = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE:List[str] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE:str = state_dict["entity_embeddings.entity_embeddings.weight"]
SCREAMING_SNAKE_CASE:int = entity_emb[entity_vocab["[MASK]"]]
SCREAMING_SNAKE_CASE:str = LukeModel(config=snake_case ).eval()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = model.load_state_dict(snake_case , strict=snake_case )
if not (len(snake_case ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {", ".join(snake_case )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
SCREAMING_SNAKE_CASE:Optional[Any] = LukeTokenizer.from_pretrained(snake_case , task="entity_classification" )
SCREAMING_SNAKE_CASE:Tuple = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
SCREAMING_SNAKE_CASE:List[str] = (39, 42)
SCREAMING_SNAKE_CASE:int = tokenizer(snake_case , entity_spans=[span] , add_prefix_space=snake_case , return_tensors="pt" )
SCREAMING_SNAKE_CASE:Any = model(**snake_case )
# Verify word hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE:List[str] = torch.Size((1, 42, 1024) )
SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
SCREAMING_SNAKE_CASE:List[str] = torch.Size((1, 42, 768) )
SCREAMING_SNAKE_CASE:int = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.Size((1, 1, 1024) )
SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
SCREAMING_SNAKE_CASE:List[str] = torch.Size((1, 1, 768) )
SCREAMING_SNAKE_CASE:Any = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(snake_case ) )
model.save_pretrained(snake_case )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = {}
with open(snake_case , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(snake_case ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = line.rstrip().split("\t" )
SCREAMING_SNAKE_CASE:str = index
return entity_vocab
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
A_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 143
|
'''simple docstring'''
def A_ ( snake_case = 600851475143 ):
try:
SCREAMING_SNAKE_CASE:str = int(snake_case )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
SCREAMING_SNAKE_CASE:Optional[Any] = 1
SCREAMING_SNAKE_CASE:Optional[Any] = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE:List[Any] = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE:List[str] = n
return int(snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 143
| 1
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__A = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__A = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__A = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def lowercase_ ( _lowerCamelCase: Any ) -> Dict:
'''simple docstring'''
def remove_articles(_lowerCamelCase: Union[str, Any] ):
__lowerCamelCase : Tuple = re.compile(r"\b(a|an|the)\b" , re.UNICODE )
return re.sub(_lowerCamelCase , " " , _lowerCamelCase )
def white_space_fix(_lowerCamelCase: Tuple ):
return " ".join(text.split() )
def remove_punc(_lowerCamelCase: Union[str, Any] ):
__lowerCamelCase : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCamelCase: Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase ) ) ) )
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: List[Any] ) -> Tuple:
'''simple docstring'''
return int(normalize_answer(_lowerCamelCase ) == normalize_answer(_lowerCamelCase ) )
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: Tuple ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : List[Any] = [any(compute_exact(_lowerCamelCase , _lowerCamelCase ) for ref in refs ) for pred, refs in zip(_lowerCamelCase , _lowerCamelCase )]
return (sum(_lowerCamelCase ) / len(_lowerCamelCase )) * 100
def lowercase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Any , _lowerCamelCase: List[Any] ) -> int:
'''simple docstring'''
__lowerCamelCase : int = [rgram for rgrams in rgramslist for rgram in rgrams]
__lowerCamelCase : int = Counter(_lowerCamelCase )
__lowerCamelCase : str = Counter(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = Counter()
for sgram, scount in sgramcounter.items():
__lowerCamelCase : List[str] = scount * numref
__lowerCamelCase : Tuple = Counter(_lowerCamelCase )
__lowerCamelCase : int = Counter()
for cgram, ccount in cgramcounter.items():
__lowerCamelCase : List[str] = ccount * numref
# KEEP
__lowerCamelCase : Tuple = sgramcounter_rep & cgramcounter_rep
__lowerCamelCase : Any = keepgramcounter_rep & rgramcounter
__lowerCamelCase : Union[str, Any] = sgramcounter_rep & rgramcounter
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : List[Any] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowerCamelCase : int = 1
__lowerCamelCase : List[Any] = 1
if len(_lowerCamelCase ) > 0:
__lowerCamelCase : str = keeptmpscorea / len(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__lowerCamelCase : List[str] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__lowerCamelCase : str = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__lowerCamelCase : Any = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__lowerCamelCase : List[Any] = sgramcounter_rep - cgramcounter_rep
__lowerCamelCase : Tuple = delgramcounter_rep - rgramcounter
__lowerCamelCase : int = sgramcounter_rep - rgramcounter
__lowerCamelCase : Any = 0
__lowerCamelCase : List[Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowerCamelCase : Union[str, Any] = 1
if len(_lowerCamelCase ) > 0:
__lowerCamelCase : Dict = deltmpscorea / len(_lowerCamelCase )
# ADDITION
__lowerCamelCase : List[Any] = set(_lowerCamelCase ) - set(_lowerCamelCase )
__lowerCamelCase : List[Any] = set(_lowerCamelCase ) & set(_lowerCamelCase )
__lowerCamelCase : Tuple = set(_lowerCamelCase ) - set(_lowerCamelCase )
__lowerCamelCase : List[str] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : int = 1
if len(_lowerCamelCase ) > 0:
__lowerCamelCase : Tuple = addtmpscore / len(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
__lowerCamelCase : str = addtmpscore / len(_lowerCamelCase )
__lowerCamelCase : Tuple = 0
if addscore_precision > 0 or addscore_recall > 0:
__lowerCamelCase : Tuple = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowercase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[Any] ) -> str:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = len(_lowerCamelCase )
__lowerCamelCase : List[str] = ssent.split(" " )
__lowerCamelCase : List[Any] = csent.split(" " )
__lowerCamelCase : int = []
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : str = []
__lowerCamelCase : Dict = []
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : List[str] = []
__lowerCamelCase : int = []
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Optional[Any] = []
for rsent in rsents:
__lowerCamelCase : List[Any] = rsent.split(" " )
__lowerCamelCase : List[str] = []
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Dict = []
ragramslist.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
__lowerCamelCase : int = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
__lowerCamelCase : str = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
__lowerCamelCase : List[Any] = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
__lowerCamelCase : List[Any] = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
__lowerCamelCase : Optional[int] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
__lowerCamelCase : Any = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
__lowerCamelCase : Dict = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
__lowerCamelCase : str = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
__lowerCamelCase : str = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_lowerCamelCase )
(__lowerCamelCase) : Union[str, Any] = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
(__lowerCamelCase) : Optional[Any] = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
(__lowerCamelCase) : Dict = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
(__lowerCamelCase) : Tuple = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Union[str, Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__lowerCamelCase : int = sum([delascore, delascore, delascore, delascore] ) / 4
__lowerCamelCase : Optional[int] = sum([addascore, addascore, addascore, addascore] ) / 4
__lowerCamelCase : List[Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowercase_ ( _lowerCamelCase: Dict , _lowerCamelCase: bool = True , _lowerCamelCase: str = "13a" , _lowerCamelCase: bool = True ) -> Optional[int]:
'''simple docstring'''
if lowercase:
__lowerCamelCase : Optional[Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__lowerCamelCase : List[str] = sacrebleu.metrics.bleu._get_tokenizer(_lowerCamelCase )()(_lowerCamelCase )
else:
__lowerCamelCase : Optional[Any] = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCamelCase )
elif tokenizer == "moses":
__lowerCamelCase : Tuple = sacremoses.MosesTokenizer().tokenize(_lowerCamelCase , return_str=_lowerCamelCase , escape=_lowerCamelCase )
elif tokenizer == "penn":
__lowerCamelCase : Dict = sacremoses.MosesTokenizer().penn_tokenize(_lowerCamelCase , return_str=_lowerCamelCase )
else:
__lowerCamelCase : List[Any] = sentence
if not return_str:
__lowerCamelCase : str = normalized_sent.split()
return normalized_sent
def lowercase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: str , _lowerCamelCase: List[Any] ) -> Any:
'''simple docstring'''
if not (len(_lowerCamelCase ) == len(_lowerCamelCase ) == len(_lowerCamelCase )):
raise ValueError("Sources length must match predictions and references lengths." )
__lowerCamelCase : List[Any] = 0
for src, pred, refs in zip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
sari_score += SARIsent(normalize(_lowerCamelCase ) , normalize(_lowerCamelCase ) , [normalize(_lowerCamelCase ) for sent in refs] )
__lowerCamelCase : List[Any] = sari_score / len(_lowerCamelCase )
return 100 * sari_score
def lowercase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Tuple="exp" , _lowerCamelCase: int=None , _lowerCamelCase: Dict=False , _lowerCamelCase: str=False , _lowerCamelCase: int=False , ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : List[Any] = len(references[0] )
if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
__lowerCamelCase : str = [[refs[i] for refs in references] for i in range(_lowerCamelCase )]
__lowerCamelCase : Any = sacrebleu.corpus_bleu(
_lowerCamelCase , _lowerCamelCase , smooth_method=_lowerCamelCase , smooth_value=_lowerCamelCase , force=_lowerCamelCase , lowercase=_lowerCamelCase , use_effective_order=_lowerCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowerCamelCase__ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def lowerCamelCase__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] ):
__lowerCamelCase : List[Any] = {}
result.update({"sari": compute_sari(sources=lowerCamelCase__ , predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({"exact": compute_em(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
return result
| 718
|
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _snake_case ( a__ , a__ ):
@register_to_config
def __init__( self : Optional[Any] , UpperCAmelCase : int = 128 , UpperCAmelCase : int = 256 , UpperCAmelCase : float = 2_0_0_0.0 , UpperCAmelCase : int = 768 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 64 , UpperCAmelCase : int = 2048 , UpperCAmelCase : float = 0.1 , ):
super().__init__()
__lowerCamelCase : List[Any] = nn.Sequential(
nn.Linear(UpperCAmelCase , d_model * 4 , bias=UpperCAmelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCAmelCase ) , nn.SiLU() , )
__lowerCamelCase : str = nn.Embedding(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Optional[int] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = nn.Dropout(p=UpperCAmelCase )
__lowerCamelCase : str = nn.ModuleList()
for lyr_num in range(UpperCAmelCase ):
# FiLM conditional T5 decoder
__lowerCamelCase : List[str] = DecoderLayer(d_model=UpperCAmelCase , d_kv=UpperCAmelCase , num_heads=UpperCAmelCase , d_ff=UpperCAmelCase , dropout_rate=UpperCAmelCase )
self.decoders.append(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = TaLayerNorm(UpperCAmelCase )
__lowerCamelCase : List[Any] = nn.Dropout(p=UpperCAmelCase )
__lowerCamelCase : Any = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : int ):
__lowerCamelCase : List[Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : str ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__lowerCamelCase : str = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__lowerCamelCase : Optional[int] = self.conditioning_emb(UpperCAmelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__lowerCamelCase : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__lowerCamelCase : int = torch.broadcast_to(
torch.arange(UpperCAmelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__lowerCamelCase : Optional[Any] = self.position_encoding(UpperCAmelCase )
__lowerCamelCase : List[str] = self.continuous_inputs_projection(UpperCAmelCase )
inputs += position_encodings
__lowerCamelCase : List[Any] = self.dropout(UpperCAmelCase )
# decoder: No padding present.
__lowerCamelCase : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__lowerCamelCase : Optional[Any] = [(x, self.encoder_decoder_mask(UpperCAmelCase , UpperCAmelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__lowerCamelCase : Union[str, Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__lowerCamelCase : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__lowerCamelCase : List[Any] = lyr(
UpperCAmelCase , conditioning_emb=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )[0]
__lowerCamelCase : Dict = self.decoder_norm(UpperCAmelCase )
__lowerCamelCase : Optional[int] = self.post_dropout(UpperCAmelCase )
__lowerCamelCase : str = self.spec_out(UpperCAmelCase )
return spec_out
class _snake_case ( nn.Module ):
def __init__( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str]=1E-6 ):
super().__init__()
__lowerCamelCase : Union[str, Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCAmelCase , d_kv=UpperCAmelCase , num_heads=UpperCAmelCase , dropout_rate=UpperCAmelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCAmelCase , d_kv=UpperCAmelCase , num_heads=UpperCAmelCase , dropout_rate=UpperCAmelCase , layer_norm_epsilon=UpperCAmelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCAmelCase , d_ff=UpperCAmelCase , dropout_rate=UpperCAmelCase , layer_norm_epsilon=UpperCAmelCase ) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Dict=None , ):
__lowerCamelCase : Union[str, Any] = self.layer[0](
UpperCAmelCase , conditioning_emb=UpperCAmelCase , attention_mask=UpperCAmelCase , )
if encoder_hidden_states is not None:
__lowerCamelCase : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
__lowerCamelCase : Dict = self.layer[1](
UpperCAmelCase , key_value_states=UpperCAmelCase , attention_mask=UpperCAmelCase , )
# Apply Film Conditional Feed Forward layer
__lowerCamelCase : List[Any] = self.layer[-1](UpperCAmelCase , UpperCAmelCase )
return (hidden_states,)
class _snake_case ( nn.Module ):
def __init__( self : str , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict ):
super().__init__()
__lowerCamelCase : Union[str, Any] = TaLayerNorm(UpperCAmelCase )
__lowerCamelCase : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = Attention(query_dim=UpperCAmelCase , heads=UpperCAmelCase , dim_head=UpperCAmelCase , out_bias=UpperCAmelCase , scale_qk=UpperCAmelCase )
__lowerCamelCase : Tuple = nn.Dropout(UpperCAmelCase )
def lowerCamelCase__ ( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=None , ):
# pre_self_attention_layer_norm
__lowerCamelCase : int = self.layer_norm(UpperCAmelCase )
if conditioning_emb is not None:
__lowerCamelCase : Optional[Any] = self.FiLMLayer(UpperCAmelCase , UpperCAmelCase )
# Self-attention block
__lowerCamelCase : Optional[Any] = self.attention(UpperCAmelCase )
__lowerCamelCase : Dict = hidden_states + self.dropout(UpperCAmelCase )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
super().__init__()
__lowerCamelCase : str = Attention(query_dim=UpperCAmelCase , heads=UpperCAmelCase , dim_head=UpperCAmelCase , out_bias=UpperCAmelCase , scale_qk=UpperCAmelCase )
__lowerCamelCase : List[Any] = TaLayerNorm(UpperCAmelCase , eps=UpperCAmelCase )
__lowerCamelCase : str = nn.Dropout(UpperCAmelCase )
def lowerCamelCase__ ( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[int]=None , ):
__lowerCamelCase : str = self.layer_norm(UpperCAmelCase )
__lowerCamelCase : Dict = self.attention(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , attention_mask=attention_mask.squeeze(1 ) , )
__lowerCamelCase : List[str] = hidden_states + self.dropout(UpperCAmelCase )
return layer_output
class _snake_case ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] ):
super().__init__()
__lowerCamelCase : str = TaDenseGatedActDense(d_model=UpperCAmelCase , d_ff=UpperCAmelCase , dropout_rate=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCAmelCase )
__lowerCamelCase : Dict = TaLayerNorm(UpperCAmelCase , eps=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = nn.Dropout(UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any=None ):
__lowerCamelCase : int = self.layer_norm(UpperCAmelCase )
if conditioning_emb is not None:
__lowerCamelCase : Union[str, Any] = self.film(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : List[str] = self.DenseReluDense(UpperCAmelCase )
__lowerCamelCase : Optional[int] = hidden_states + self.dropout(UpperCAmelCase )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ):
super().__init__()
__lowerCamelCase : List[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : List[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : List[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : str = nn.Dropout(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = NewGELUActivation()
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : List[Any] ):
__lowerCamelCase : Union[str, Any] = self.act(self.wi_a(UpperCAmelCase ) )
__lowerCamelCase : Any = self.wi_a(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = hidden_gelu * hidden_linear
__lowerCamelCase : Any = self.dropout(UpperCAmelCase )
__lowerCamelCase : List[Any] = self.wo(UpperCAmelCase )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[str]=1E-6 ):
super().__init__()
__lowerCamelCase : List[Any] = nn.Parameter(torch.ones(UpperCAmelCase ) )
__lowerCamelCase : Tuple = eps
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : Any ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
__lowerCamelCase : int = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCAmelCase )
__lowerCamelCase : Dict = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__lowerCamelCase : Union[str, Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _snake_case ( nn.Module ):
def lowerCamelCase__ ( self : str , UpperCAmelCase : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(UpperCAmelCase , 3.0 )) ))
class _snake_case ( nn.Module ):
def __init__( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ):
super().__init__()
__lowerCamelCase : Any = nn.Linear(UpperCAmelCase , out_features * 2 , bias=UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any ):
__lowerCamelCase : Optional[Any] = self.scale_bias(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : Dict = torch.chunk(UpperCAmelCase , 2 , -1 )
__lowerCamelCase : List[Any] = x * (1 + scale) + shift
return x
| 366
| 0
|
'''simple docstring'''
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = set_counts
lowerCAmelCase__ = max(lowerCamelCase_ )
lowerCAmelCase__ = len(lowerCamelCase_ )
lowerCAmelCase__ = [1] * num_sets
lowerCAmelCase__ = list(range(lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
lowerCAmelCase__ = self.get_parent(lowerCamelCase_ )
lowerCAmelCase__ = self.get_parent(lowerCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowerCAmelCase__ = 0
lowerCAmelCase__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowerCAmelCase__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowerCAmelCase__ = 0
lowerCAmelCase__ = src_parent
lowerCAmelCase__ = self.set_counts[src_parent]
lowerCAmelCase__ = max(self.max_set , lowerCamelCase_ )
return True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
lowerCAmelCase__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 90
|
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger("transformers.models.speecht5")
__A : Optional[Any] = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A : Optional[int] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A : List[str] = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A : List[Any] = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A : Union[str, Any] = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A : Any = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A : Union[str, Any] = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A : Optional[int] = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A : Optional[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : int = []
__A : int = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A : Optional[Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A : Tuple = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A : Union[str, Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowercase ( UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ):
"""simple docstring"""
for attribute in key.split("." ):
A__ : Dict =getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
A__ : Union[str, Any] =getattr(UpperCamelCase , UpperCamelCase ).shape
else:
A__ : Tuple =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A__ : Any =value
elif weight_type == "weight_g":
A__ : Any =value
elif weight_type == "weight_v":
A__ : Any =value
elif weight_type == "bias":
A__ : Tuple =value
elif weight_type == "running_mean":
A__ : Dict =value
elif weight_type == "running_var":
A__ : List[str] =value
elif weight_type == "num_batches_tracked":
A__ : Dict =value
else:
A__ : Optional[int] =value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A__ , A__ : List[str] =key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Tuple =[]
if task == "s2t":
A__ : Dict =hf_model.speechta.encoder.prenet.feature_encoder
A__ : int =MAPPING_S2T
A__ : List[Any] =IGNORE_KEYS_S2T
elif task == "t2s":
A__ : Union[str, Any] =None
A__ : List[Any] =MAPPING_T2S
A__ : Tuple =IGNORE_KEYS_T2S
elif task == "s2s":
A__ : Optional[Any] =hf_model.speechta.encoder.prenet.feature_encoder
A__ : Tuple =MAPPING_S2S
A__ : Any =IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase , UpperCamelCase ):
logger.info(F'''{name} was ignored''' )
continue
A__ : Optional[Any] =False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
A__ : List[Any] =True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
A__ , A__ : Dict =key.split(".*." )
if prefix in name and suffix in name:
A__ : int =suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
A__ : List[Any] =True
if "*" in mapped_key:
A__ : Optional[int] =name.split(UpperCamelCase )[0].split("." )[-2]
A__ : int =mapped_key.replace("*" , UpperCamelCase )
if "weight_g" in name:
A__ : str ="weight_g"
elif "weight_v" in name:
A__ : Optional[Any] ="weight_v"
elif "bias" in name:
A__ : Any ="bias"
elif "weight" in name:
A__ : Optional[int] ="weight"
elif "running_mean" in name:
A__ : Tuple ="running_mean"
elif "running_var" in name:
A__ : Optional[int] ="running_var"
elif "num_batches_tracked" in name:
A__ : str ="num_batches_tracked"
else:
A__ : List[Any] =None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Any =full_name.split("conv_layers." )[-1]
A__ : Dict =name.split("." )
A__ : int =int(items[0] )
A__ : str =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A__ : Optional[Any] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A__ : Optional[int] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
A__ : Any =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
A__ : Any =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def lowercase ( UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str=None , UpperCamelCase : Any=None , UpperCamelCase : Tuple=None , ):
"""simple docstring"""
if config_path is not None:
A__ : Any =SpeechTaConfig.from_pretrained(UpperCamelCase )
else:
A__ : Any =SpeechTaConfig()
if task == "s2t":
A__ : Union[str, Any] =config.max_text_positions
A__ : Dict =SpeechTaForSpeechToText(UpperCamelCase )
elif task == "t2s":
A__ : str =1876
A__ : Optional[int] =600
A__ : Tuple =config.max_speech_positions
A__ : Optional[Any] =SpeechTaForTextToSpeech(UpperCamelCase )
elif task == "s2s":
A__ : str =1876
A__ : Tuple =config.max_speech_positions
A__ : Any =SpeechTaForSpeechToSpeech(UpperCamelCase )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
A__ : str =SpeechTaTokenizer(UpperCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
A__ : Optional[Any] =AddedToken("<mask>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
A__ : int =mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
A__ : Dict =SpeechTaFeatureExtractor()
A__ : Tuple =SpeechTaProcessor(tokenizer=UpperCamelCase , feature_extractor=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
A__ : Union[str, Any] =torch.load(UpperCamelCase )
recursively_load_weights(fairseq_checkpoint["model"] , UpperCamelCase , UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A : str = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 656
| 0
|
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__lowerCamelCase : Optional[Any] = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
__lowerCamelCase : List[Any] = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
__lowerCamelCase : List[Any] = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def __a ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def __a ( self : Any , _lowercase : int , _lowercase : Dict , _lowercase : str=None , _lowercase : List[Any]=1 , _lowercase : Tuple="binary" , _lowercase : Tuple=None , _lowercase : Tuple="warn" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = recall_score(
__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , )
return {"recall": float(__lowerCamelCase ) if score.size == 1 else score}
| 717
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowerCamelCase : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __snake_case ( datasets.BuilderConfig ):
lowerCAmelCase_ = 1_00_00
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class __snake_case ( datasets.ArrowBasedBuilder ):
lowerCAmelCase_ = ParquetConfig
def __a ( self : List[str] ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __a ( self : Dict , _lowercase : List[str] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
SCREAMING_SNAKE_CASE__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowercase , (str, list, tuple) ):
SCREAMING_SNAKE_CASE__ = data_files
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE__ = [dl_manager.iter_files(_lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
SCREAMING_SNAKE_CASE__ = []
for split_name, files in data_files.items():
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE__ = [dl_manager.iter_files(_lowercase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_lowercase ):
with open(_lowercase , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = datasets.Features.from_arrow_schema(pq.read_schema(_lowercase ) )
break
splits.append(datasets.SplitGenerator(name=_lowercase , gen_kwargs={"""files""": files} ) )
return splits
def __a ( self : Optional[Any] , _lowercase : pa.Table ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE__ = table_cast(_lowercase , self.info.features.arrow_schema )
return pa_table
def __a ( self : Dict , _lowercase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowercase ) ):
with open(_lowercase , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = pq.ParquetFile(_lowercase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
SCREAMING_SNAKE_CASE__ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(_lowercase )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(_lowercase )}: {e}""" )
raise
| 379
| 0
|
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : Any ) -> int:
_UpperCamelCase = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , ['''c'''] )
self.assertEqual(__UpperCamelCase , [2] )
# Out indices set to match out features
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(['''a''', '''c'''] , __UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , ['''a''', '''c'''] )
self.assertEqual(__UpperCamelCase , [0, 2] )
# Out features set to match out indices
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(__UpperCamelCase , [0, 2] , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , ['''a''', '''c'''] )
self.assertEqual(__UpperCamelCase , [0, 2] )
# Out features selected from negative indices
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(__UpperCamelCase , [-3, -1] , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , ['''a''', '''c'''] )
self.assertEqual(__UpperCamelCase , [-3, -1] )
def _UpperCamelCase ( self : Dict ) -> Any:
# Stage names must be set
with self.assertRaises(__UpperCamelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , __UpperCamelCase )
# Out features must be a list
with self.assertRaises(__UpperCamelCase ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(__UpperCamelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(__UpperCamelCase ):
verify_out_features_out_indices(__UpperCamelCase , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(__UpperCamelCase ):
verify_out_features_out_indices(__UpperCamelCase , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(__UpperCamelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(__UpperCamelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(__UpperCamelCase ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def _UpperCamelCase ( self : str ) -> Optional[int]:
_UpperCamelCase = BackboneMixin()
_UpperCamelCase = ['''a''', '''b''', '''c''']
_UpperCamelCase = ['''a''', '''c''']
_UpperCamelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCamelCase = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCamelCase = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 420
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''gpt_neox'''
def __init__( self : Dict , __UpperCamelCase : int=5_0432 , __UpperCamelCase : List[Any]=6144 , __UpperCamelCase : str=44 , __UpperCamelCase : List[str]=64 , __UpperCamelCase : int=2_4576 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Dict=0.2_5 , __UpperCamelCase : int=1_0000 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Dict=2048 , __UpperCamelCase : Optional[Any]=0.0_2 , __UpperCamelCase : Optional[Any]=1E-5 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Optional[int]=0 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : List[Any]=False , __UpperCamelCase : List[str]=True , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : Union[str, Any] , ) -> Union[str, Any]:
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = rotary_pct
_UpperCamelCase = rotary_emb_base
_UpperCamelCase = attention_dropout
_UpperCamelCase = hidden_dropout
_UpperCamelCase = classifier_dropout
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = use_cache
_UpperCamelCase = tie_word_embeddings
_UpperCamelCase = use_parallel_residual
_UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _UpperCamelCase ( self : Optional[int] ) -> Tuple:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
_UpperCamelCase = self.rope_scaling.get('''type''' , __UpperCamelCase )
_UpperCamelCase = self.rope_scaling.get('''factor''' , __UpperCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCamelCase , __UpperCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 420
| 1
|
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _lowercase ( lowerCamelCase__ : List[str] ):
def wrapper(*lowerCamelCase__ : Tuple, **lowerCamelCase__ : Tuple ):
_a = timeit.default_timer()
_a = func(*lowerCamelCase__, **lowerCamelCase__ )
_a = timeit.default_timer() - starttime
return delta
_a = func.__name__
return wrapper
def _lowercase ( lowerCamelCase__ : dict, lowerCamelCase__ : int=100, lowerCamelCase__ : Optional[Any]=None ):
_a = []
_a = seq_shapes or {}
for i in range(lowerCamelCase__ ):
_a = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCamelCase__, _ArrayXD ):
_a = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCamelCase__, datasets.Value ):
if v.dtype == "string":
_a = "The small grey turtle was surprisingly fast when challenged."
else:
_a = np.random.randint(10, size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCamelCase__, datasets.Sequence ):
while isinstance(lowerCamelCase__, datasets.Sequence ):
_a = v.feature
_a = seq_shapes[k]
_a = np.random.rand(*lowerCamelCase__ ).astype(v.dtype )
_a = data
dummy_data.append((i, example) )
return dummy_data
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : Optional[Any]=100, lowerCamelCase__ : Optional[Any]=None ):
_a = generate_examples(lowerCamelCase__, num_examples=lowerCamelCase__, seq_shapes=lowerCamelCase__ )
with ArrowWriter(features=lowerCamelCase__, path=lowerCamelCase__ ) as writer:
for key, record in dummy_data:
_a = features.encode_example(lowerCamelCase__ )
writer.write(lowerCamelCase__ )
_a , _a = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
_a = datasets.Dataset.from_file(filename=lowerCamelCase__, info=datasets.DatasetInfo(features=lowerCamelCase__ ) )
return dataset
| 691
|
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowercase ( ):
_a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 )
parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 )
parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 )
parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ )
parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 )
parser.add_argument("--seed", type=lowerCamelCase__, default=0 )
parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" )
parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 )
parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 )
parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" )
return parser.parse_args()
__snake_case : str = load("accuracy")
def _lowercase ( lowerCamelCase__ : List[str] ):
_a , _a = eval_pred
_a = np.argmax(lowerCamelCase__, axis=1 )
return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ )
class A ( a ):
def __init__( self , snake_case_ ) -> None:
super().__init__()
_a = trainer
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
if control.should_evaluate:
_a = deepcopy(snake_case_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowercase ( ):
_a = get_args()
set_seed(args.seed )
_a = load_dataset("codeparrot/codecomplex", split="train" )
_a = dataset.train_test_split(test_size=0.2 )
_a = train_test["test"].train_test_split(test_size=0.5 )
_a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_a = AutoTokenizer.from_pretrained(args.model_ckpt )
_a = tokenizer.eos_token
_a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
_a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_a = False
_a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ : Tuple ):
_a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 )
_a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_a = train_test_validation.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, )
_a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
_a = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", )
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 691
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = ["""pixel_values"""]
def __init__( self, _lowercase = True, _lowercase = None, _lowercase = PILImageResampling.BILINEAR, _lowercase = True, _lowercase = None, _lowercase = True, _lowercase = 1 / 255, _lowercase = True, _lowercase = None, _lowercase = None, **_lowercase, ) -> None:
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE_ = size if size is not None else {'shortest_edge': 256}
SCREAMING_SNAKE_CASE_ = get_size_dict(_lowercase, default_to_square=_lowercase )
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE_ = get_size_dict(_lowercase, param_name='crop_size' )
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = resample
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self, _lowercase, _lowercase, _lowercase = PILImageResampling.BICUBIC, _lowercase = None, **_lowercase, ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = get_size_dict(_lowercase, default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE_ = get_resize_output_image_size(_lowercase, size=size['shortest_edge'], default_to_square=_lowercase )
return resize(_lowercase, size=_lowercase, resample=_lowercase, data_format=_lowercase, **_lowercase )
def a__ ( self, _lowercase, _lowercase, _lowercase = None, **_lowercase, ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(_lowercase, size=(size['height'], size['width']), data_format=_lowercase, **_lowercase )
def a__ ( self, _lowercase, _lowercase, _lowercase = None, **_lowercase ) -> np.ndarray:
return rescale(_lowercase, scale=_lowercase, data_format=_lowercase, **_lowercase )
def a__ ( self, _lowercase, _lowercase, _lowercase, _lowercase = None, **_lowercase, ) -> np.ndarray:
return normalize(_lowercase, mean=_lowercase, std=_lowercase, data_format=_lowercase, **_lowercase )
def a__ ( self, _lowercase, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = ChannelDimension.FIRST, **_lowercase, ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ = get_size_dict(_lowercase, default_to_square=_lowercase )
SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ = get_size_dict(_lowercase, param_name='crop_size' )
SCREAMING_SNAKE_CASE_ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ = [self.resize(image=_lowercase, size=_lowercase, resample=_lowercase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ = [self.center_crop(image=_lowercase, size=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ = [self.rescale(image=_lowercase, scale=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ = [self.normalize(image=_lowercase, mean=_lowercase, std=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(_lowercase, _lowercase ) for image in images]
SCREAMING_SNAKE_CASE_ = {'pixel_values': images}
return BatchFeature(data=_lowercase, tensor_type=_lowercase )
def a__ ( self, _lowercase, _lowercase = None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE_ = target_sizes.numpy()
SCREAMING_SNAKE_CASE_ = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode='bilinear', align_corners=_lowercase )
SCREAMING_SNAKE_CASE_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE_ = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 294
|
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
SCREAMING_SNAKE_CASE : List[Any] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def _UpperCamelCase ( lowerCAmelCase__: Dict ,lowerCAmelCase__: int ,lowerCAmelCase__: Tuple=None ) -> Union[str, Any]:
if rng is None:
SCREAMING_SNAKE_CASE_ = random.Random()
SCREAMING_SNAKE_CASE_ = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE_ = []
for _ in range(lowerCAmelCase__ ):
values.append(rng.randint(0 ,vocab_size - 1 ) )
SCREAMING_SNAKE_CASE_ = np.array(lowerCAmelCase__ ,dtype=jnp.intaa ).reshape(lowerCAmelCase__ )
return output
def _UpperCamelCase ( lowerCAmelCase__: Tuple ,lowerCAmelCase__: str=None ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = ids_tensor(lowerCAmelCase__ ,vocab_size=2 ,rng=lowerCAmelCase__ )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE_ = 1
return attn_mask
@require_flax
class snake_case :
"""simple docstring"""
_a = None
_a = ()
def a__ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = inputs['input_ids'].shape[-1] // 2
SCREAMING_SNAKE_CASE_ = inputs['input_ids'][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE_ = jnp.ones_like(_lowercase )
SCREAMING_SNAKE_CASE_ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE_ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE_ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def a__ ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ = getattr(_lowercase, _lowercase )
SCREAMING_SNAKE_CASE_ = pt_model_class(_lowercase ).eval()
SCREAMING_SNAKE_CASE_ = load_flax_weights_in_pytorch_model(_lowercase, flax_model.params )
SCREAMING_SNAKE_CASE_ = flax_model.generate(_lowercase ).sequences
SCREAMING_SNAKE_CASE_ = pt_model.generate(torch.tensor(_lowercase, dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE_ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist() )
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences )
def a__ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = 0.8
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 0.3
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_ = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase, attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase, attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_ = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase, attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase, attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_ = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowercase )
SCREAMING_SNAKE_CASE_ = model.generate(_lowercase, attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1], _lowercase )
SCREAMING_SNAKE_CASE_ = jit(model.generate )
SCREAMING_SNAKE_CASE_ = jit_generate(_lowercase, attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
@require_flax
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
SCREAMING_SNAKE_CASE_ = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
SCREAMING_SNAKE_CASE_ = 'Hello world'
SCREAMING_SNAKE_CASE_ = tokenizer(_lowercase, return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_lowercase, 'do_samples' ):
model.generate(_lowercase, do_samples=_lowercase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_lowercase, 'foo' ):
SCREAMING_SNAKE_CASE_ = {'foo': 'bar'}
model.generate(_lowercase, **_lowercase )
| 294
| 1
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 542
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : str = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 542
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Tuple = StableDiffusionPanoramaPipeline
_lowerCamelCase : str = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase ( self : List[Any] ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
_UpperCAmelCase = DDIMScheduler()
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_UpperCAmelCase = CLIPTextModel(snake_case_ )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowercase ( self : Optional[int] , snake_case_ : Optional[int] , snake_case_ : List[Any]=0 ):
_UpperCAmelCase = torch.manual_seed(snake_case_ )
_UpperCAmelCase = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowercase ( self : int ):
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**snake_case_ )
_UpperCAmelCase = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = sd_pipe(**snake_case_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : List[Any] ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase ( self : Optional[int] ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def lowercase ( self : Any ):
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**snake_case_ )
_UpperCAmelCase = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = "french fries"
_UpperCAmelCase = sd_pipe(**snake_case_ , negative_prompt=snake_case_ )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**snake_case_ )
_UpperCAmelCase = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = sd_pipe(**snake_case_ , view_batch_size=2 )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : Tuple ):
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" )
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**snake_case_ )
_UpperCAmelCase = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = sd_pipe(**snake_case_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : Dict ):
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , skip_prk_steps=snake_case_ )
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**snake_case_ )
_UpperCAmelCase = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case_ )
_UpperCAmelCase = sd_pipe(**snake_case_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowercase ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : List[Any] , snake_case_ : Optional[Any]=0 ):
_UpperCAmelCase = torch.manual_seed(snake_case_ )
_UpperCAmelCase = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = "stabilityai/stable-diffusion-2-base"
_UpperCAmelCase = DDIMScheduler.from_pretrained(snake_case_ , subfolder="scheduler" )
_UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**snake_case_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
_UpperCAmelCase = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def lowercase ( self : List[str] ):
_UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=snake_case_ )
_UpperCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**snake_case_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
_UpperCAmelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Tuple ):
_UpperCAmelCase = 0
def callback_fn(snake_case_ : int , snake_case_ : int , snake_case_ : torch.FloatTensor ) -> None:
_UpperCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
_UpperCAmelCase = latents[0, -3:, -3:, -1]
_UpperCAmelCase = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
_UpperCAmelCase = latents[0, -3:, -3:, -1]
_UpperCAmelCase = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_UpperCAmelCase = False
_UpperCAmelCase = "stabilityai/stable-diffusion-2-base"
_UpperCAmelCase = DDIMScheduler.from_pretrained(snake_case_ , subfolder="scheduler" )
_UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
_UpperCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
pipe(**snake_case_ , callback=snake_case_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase ( self : List[str] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = "stabilityai/stable-diffusion-2-base"
_UpperCAmelCase = DDIMScheduler.from_pretrained(snake_case_ , subfolder="scheduler" )
_UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
_UpperCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**snake_case_ )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 1_0**9
| 236
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def UpperCAmelCase_ ( __lowercase : int ) -> Optional[Any]:
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=__lowercase )
_UpperCAmelCase = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__lowercase )
EnvironmentCommand.register_subcommand(__lowercase )
TestCommand.register_subcommand(__lowercase )
RunBeamCommand.register_subcommand(__lowercase )
DummyDataCommand.register_subcommand(__lowercase )
# Parse args
_UpperCAmelCase , _UpperCAmelCase = parser.parse_known_args()
if not hasattr(__lowercase , "func" ):
parser.print_help()
exit(1 )
_UpperCAmelCase = parse_unknown_args(__lowercase )
# Run
_UpperCAmelCase = args.func(__lowercase , **__lowercase )
service.run()
if __name__ == "__main__":
main()
| 236
| 1
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=13 ,SCREAMING_SNAKE_CASE_=[30, 30] ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=32 ,SCREAMING_SNAKE_CASE_=5 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=37 ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=10 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_=10 ,):
'''simple docstring'''
snake_case : List[str] = parent
snake_case : int = batch_size
snake_case : List[str] = image_size
snake_case : str = patch_size
snake_case : Optional[int] = num_channels
snake_case : List[Any] = is_training
snake_case : List[str] = use_labels
snake_case : Tuple = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : Dict = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : Union[str, Any] = type_sequence_label_size
snake_case : Any = initializer_range
snake_case : Union[str, Any] = num_labels
snake_case : str = scope
snake_case : Any = n_targets
snake_case : Any = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
snake_case : Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
snake_case : Any = num_patches + 1 + self.num_detection_tokens
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
snake_case : str = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
snake_case : Any = []
for i in range(self.batch_size ):
snake_case : Optional[int] = {}
snake_case : Any = torch.randint(
high=self.num_labels ,size=(self.n_targets,) ,device=SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = torch.rand(self.n_targets ,4 ,device=SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=SCREAMING_SNAKE_CASE_ ,initializer_range=self.initializer_range ,num_detection_tokens=self.num_detection_tokens ,num_labels=self.num_labels ,)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : str = YolosModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case : Dict = model(pixel_values=SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape ,(self.batch_size, self.num_detection_tokens, 4) )
snake_case : Union[str, Any] = model(pixel_values=SCREAMING_SNAKE_CASE_ ,labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape ,(self.batch_size, self.num_detection_tokens, 4) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case : Any = config_and_inputs
snake_case : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( snake_case , snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__lowerCamelCase : str = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
__lowerCamelCase : List[Any] = False
__lowerCamelCase : str = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Any = False
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
snake_case : Any = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
snake_case : Any = []
for i in range(self.model_tester.batch_size ):
snake_case : Union[str, Any] = {}
snake_case : Tuple = torch.ones(
size=(self.model_tester.n_targets,) ,device=SCREAMING_SNAKE_CASE_ ,dtype=torch.long )
snake_case : List[str] = torch.ones(
self.model_tester.n_targets ,4 ,device=SCREAMING_SNAKE_CASE_ ,dtype=torch.float )
labels.append(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = labels
return inputs_dict
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = YolosModelTester(self )
snake_case : Optional[int] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE_ ,has_text_modality=SCREAMING_SNAKE_CASE_ ,hidden_size=37 )
def snake_case_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ):
'''simple docstring'''
# YOLOS does not use inputs_embeds
pass
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
snake_case : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ ,nn.Linear ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : str = model_class(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : List[Any] = [*signature.parameters.keys()]
snake_case : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : str = True
# in YOLOS, the seq_len is different
snake_case : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
snake_case : Optional[int] = True
snake_case : int = False
snake_case : Tuple = True
snake_case : int = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
snake_case : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
snake_case : int = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case : Tuple = True
snake_case : int = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
snake_case : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
snake_case : Any = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
snake_case : List[Any] = len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
snake_case : Optional[int] = True
snake_case : Optional[Any] = True
snake_case : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
snake_case : List[str] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
snake_case : List[str] = 1
self.assertEqual(out_len + added_hidden_states ,len(SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
def snake_case_ ( self ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : int = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
snake_case : List[str] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
snake_case : Any = outputs.hidden_states
snake_case : Tuple = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ )
# YOLOS has a different seq_length
snake_case : Optional[Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
snake_case , snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Tuple = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Optional[Any] = YolosModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_ ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Any = self.default_image_processor
snake_case : Tuple = prepare_img()
snake_case : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
snake_case : Optional[int] = model(inputs.pixel_values )
# verify outputs
snake_case : str = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] ,device=SCREAMING_SNAKE_CASE_ ,)
snake_case : Optional[int] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] ,device=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,SCREAMING_SNAKE_CASE_ ,atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] ,SCREAMING_SNAKE_CASE_ ,atol=1E-4 ) )
# verify postprocessing
snake_case : str = image_processor.post_process_object_detection(
SCREAMING_SNAKE_CASE_ ,threshold=0.3 ,target_sizes=[image.size[::-1]] )[0]
snake_case : List[Any] = torch.tensor([0.99_94, 0.97_90, 0.99_64, 0.99_72, 0.98_61] ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Any = [75, 75, 17, 63, 17]
snake_case : List[str] = torch.tensor([3_35.06_09, 79.38_48, 3_75.42_16, 1_87.24_95] ).to(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(results["""scores"""] ) ,5 )
self.assertTrue(torch.allclose(results["""scores"""] ,SCREAMING_SNAKE_CASE_ ,atol=1E-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() ,SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] ,SCREAMING_SNAKE_CASE_ ) )
| 315
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase ( __A : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def lowercase ( __A : Tuple , __A : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def lowercase ( __A : str ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", """stage2.cls_token""") )
return token
def lowercase ( ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def lowercase ( __A : int , __A : Union[str, Any] , __A : Dict , __A : Dict ) -> int:
'''simple docstring'''
snake_case : Dict = """imagenet-1k-id2label.json"""
snake_case : Tuple = 1000
snake_case : List[Any] = """huggingface/label-files"""
snake_case : List[str] = num_labels
snake_case : Optional[Any] = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type="""dataset""" ) ) , """r""" ) )
snake_case : str = {int(__A ): v for k, v in idalabel.items()}
snake_case : Union[str, Any] = idalabel
snake_case : Any = {v: k for k, v in idalabel.items()}
snake_case : Optional[Any] = CvtConfig(num_labels=__A , idalabel=__A , labelaid=__A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case : Dict = [2, 2, 20]
snake_case : List[str] = [3, 12, 16]
snake_case : int = [192, 768, 1024]
snake_case : Union[str, Any] = CvtForImageClassification(__A )
snake_case : int = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case : Union[str, Any] = image_size
snake_case : Dict = torch.load(__A , map_location=torch.device("""cpu""" ) )
snake_case : List[str] = OrderedDict()
snake_case : Optional[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case : Optional[int] = list_of_state_dict + cls_token(__A )
snake_case : Dict = list_of_state_dict + embeddings(__A )
for cnt in range(config.depth[idx] ):
snake_case : Any = list_of_state_dict + attention(__A , __A )
snake_case : Tuple = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__A )
for i in range(len(__A ) ):
snake_case : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__A )
model.save_pretrained(__A )
image_processor.save_pretrained(__A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase : List[Any] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 315
| 1
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase(_lowercase , _lowercase , unittest.TestCase ):
__snake_case: Optional[int] = StableDiffusionDiffEditPipeline
__snake_case: List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
__snake_case: Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
__snake_case: int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case: Tuple = frozenset([] )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
a__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=__SCREAMING_SNAKE_CASE , )
a__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
a__ = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_zero=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
a__ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
a__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
a__ = CLIPTextModel(__SCREAMING_SNAKE_CASE )
a__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a__ = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> str:
"""simple docstring"""
a__ = floats_tensor((1, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a__ = floats_tensor((1, 2, 4, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith('mps' ):
a__ = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a__ = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a__ = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> str:
"""simple docstring"""
a__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a__ = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert('RGB' )
if str(__SCREAMING_SNAKE_CASE ).startswith('mps' ):
a__ = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a__ = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a__ = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Optional[Any]:
"""simple docstring"""
a__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a__ = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert('RGB' )
if str(__SCREAMING_SNAKE_CASE ).startswith('mps' ):
a__ = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a__ = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a__ = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def lowercase__ ( self ) -> Any:
"""simple docstring"""
if not hasattr(self.pipeline_class , '_optional_components' ):
return
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
a__ = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
a__ = pipe(**__SCREAMING_SNAKE_CASE )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__SCREAMING_SNAKE_CASE )
a__ = self.pipeline_class.from_pretrained(__SCREAMING_SNAKE_CASE )
pipe_loaded.to(__SCREAMING_SNAKE_CASE )
pipe_loaded.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
a__ = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
a__ = pipe_loaded(**__SCREAMING_SNAKE_CASE )[0]
a__ = np.abs(output - output_loaded ).max()
self.assertLess(__SCREAMING_SNAKE_CASE , 1e-4 )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
a__ = 'cpu'
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a__ = self.get_dummy_mask_inputs(__SCREAMING_SNAKE_CASE )
a__ = pipe.generate_mask(**__SCREAMING_SNAKE_CASE )
a__ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 1_6, 1_6) )
a__ = np.array([0] * 9 )
a__ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ = 'cpu'
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a__ = self.get_dummy_inversion_inputs(__SCREAMING_SNAKE_CASE )
a__ = pipe.invert(**__SCREAMING_SNAKE_CASE ).images
a__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
a__ = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
a__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ = 'cpu'
a__ = self.get_dummy_components()
a__ = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
a__ = DPMSolverMultistepScheduler(**__SCREAMING_SNAKE_CASE )
a__ = DPMSolverMultistepInverseScheduler(**__SCREAMING_SNAKE_CASE )
a__ = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a__ = self.get_dummy_inversion_inputs(__SCREAMING_SNAKE_CASE )
a__ = pipe.invert(**__SCREAMING_SNAKE_CASE ).images
a__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
a__ = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
a__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-3 )
@require_torch_gpu
@slow
class lowercase(unittest.TestCase ):
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowercase__ ( cls ) -> Optional[Any]:
"""simple docstring"""
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
a__ = raw_image.convert('RGB' ).resize((7_6_8, 7_6_8) )
a__ = raw_image
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ = torch.manual_seed(0 )
a__ = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
a__ = DDIMScheduler.from_config(pipe.scheduler.config )
a__ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a__ = 'a bowl of fruit'
a__ = 'a bowl of pears'
a__ = pipe.generate_mask(
image=self.raw_image , source_prompt=__SCREAMING_SNAKE_CASE , target_prompt=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , )
a__ = pipe.invert(
prompt=__SCREAMING_SNAKE_CASE , image=self.raw_image , inpaint_strength=0.7 , generator=__SCREAMING_SNAKE_CASE ).latents
a__ = pipe(
prompt=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , image_latents=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
a__ = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5e-1
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
a__ = torch.manual_seed(0 )
a__ = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
a__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
a__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a__ = 'a bowl of fruit'
a__ = 'a bowl of pears'
a__ = pipe.generate_mask(
image=self.raw_image , source_prompt=__SCREAMING_SNAKE_CASE , target_prompt=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , )
a__ = pipe.invert(
prompt=__SCREAMING_SNAKE_CASE , image=self.raw_image , inpaint_strength=0.7 , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2_5 , ).latents
a__ = pipe(
prompt=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , image_latents=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , inpaint_strength=0.7 , num_inference_steps=2_5 , output_type='numpy' , ).images[0]
a__ = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 273
|
"""simple docstring"""
import operator
def __magic_name__ ( UpperCamelCase : list , UpperCamelCase : bool = False , UpperCamelCase : list | None = None ) -> list:
a__ = operator.lt if reverse else operator.gt
a__ = solution or []
if not arr:
return solution
a__ = [arr.pop(0 )]
for i, item in enumerate(UpperCamelCase ):
if _operator(UpperCamelCase , sublist[-1] ):
sublist.append(UpperCamelCase )
arr.pop(UpperCamelCase )
# merging sublist into solution list
if not solution:
solution.extend(UpperCamelCase )
else:
while sublist:
a__ = sublist.pop(0 )
for i, xx in enumerate(UpperCamelCase ):
if not _operator(UpperCamelCase , UpperCamelCase ):
solution.insert(UpperCamelCase , UpperCamelCase )
break
else:
solution.append(UpperCamelCase )
strand_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 273
| 1
|
'''simple docstring'''
_A : Optional[int] ={
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 631
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """rwkv"""
a = {"""max_position_embeddings""": """context_length"""}
def __init__( self: Tuple , UpperCamelCase__: Optional[Any]=50_277 , UpperCamelCase__: Union[str, Any]=1_024 , UpperCamelCase__: Tuple=4_096 , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Dict=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=1e-5 , UpperCamelCase__: Any=0 , UpperCamelCase__: str=0 , UpperCamelCase__: Union[str, Any]=6 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Dict=True , **UpperCamelCase__: Dict , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Optional[Any] = context_length
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : int = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase__ : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase__ : List[str] = layer_norm_epsilon
lowerCamelCase__ : int = rescale_every
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : Dict = bos_token_id
lowerCamelCase__ : Any = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 631
| 1
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case : List[str] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = -1
snake_case : Union[str, Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = model.generate(SCREAMING_SNAKE_CASE_ ,max_new_tokens=10 ,do_sample=SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
snake_case : Any = TextStreamer(SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ ,max_new_tokens=10 ,do_sample=SCREAMING_SNAKE_CASE_ ,streamer=SCREAMING_SNAKE_CASE_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case : Any = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = -1
snake_case : Optional[int] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = model.generate(SCREAMING_SNAKE_CASE_ ,max_new_tokens=10 ,do_sample=SCREAMING_SNAKE_CASE_ )
snake_case : Any = tokenizer.decode(greedy_ids[0] )
snake_case : List[str] = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
snake_case : List[Any] = Thread(target=model.generate ,kwargs=SCREAMING_SNAKE_CASE_ )
thread.start()
snake_case : Tuple = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Any = -1
snake_case : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = model.generate(SCREAMING_SNAKE_CASE_ ,max_new_tokens=10 ,do_sample=SCREAMING_SNAKE_CASE_ )
snake_case : str = greedy_ids[:, input_ids.shape[1] :]
snake_case : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
snake_case : str = TextStreamer(SCREAMING_SNAKE_CASE_ ,skip_prompt=SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ ,max_new_tokens=10 ,do_sample=SCREAMING_SNAKE_CASE_ ,streamer=SCREAMING_SNAKE_CASE_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case : str = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
snake_case : Dict = AutoTokenizer.from_pretrained("""distilgpt2""" )
snake_case : Tuple = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = -1
snake_case : Any = torch.ones((1, 5) ,device=SCREAMING_SNAKE_CASE_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
snake_case : List[Any] = TextStreamer(SCREAMING_SNAKE_CASE_ ,skip_special_tokens=SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ ,max_new_tokens=1 ,do_sample=SCREAMING_SNAKE_CASE_ ,streamer=SCREAMING_SNAKE_CASE_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
snake_case : Dict = cs.out[:-1] # Remove the final "\n"
snake_case : Tuple = tokenizer(SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = -1
snake_case : int = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ ,timeout=0.0_01 )
snake_case : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
snake_case : List[str] = Thread(target=model.generate ,kwargs=SCREAMING_SNAKE_CASE_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
snake_case : Tuple = """"""
for new_text in streamer:
streamer_text += new_text
| 36
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase ( __A : Dict , __A : Union[str, Any] , __A : List[str] ) -> Any:
'''simple docstring'''
snake_case : Tuple = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case : Optional[Any] = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
snake_case : Optional[int] = f"""{src_lang}-{tgt_lang}"""
snake_case : Any = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__A , exist_ok=__A )
snake_case : Union[str, Any] = os.path.join(__A , """README.md""" )
print(f"""Generating {path}""" )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(__A )
# make sure we are under the root of the project
__lowercase : int = Path(__file__).resolve().parent.parent.parent
__lowercase : List[str] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowercase , __lowercase , __lowercase : List[str] = model_name.split('''-''')
__lowercase : str = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 36
| 1
|
"""simple docstring"""
import random
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = False ):
"""simple docstring"""
_UpperCAmelCase = {i: [] for i in range(lowercase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowercase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowercase ):
for j in range(i + 1 ,lowercase ):
if random.random() < probability:
graph[i].append(lowercase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowercase )
return graph
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return {
i: [j for j in range(lowercase ) if i != j] for i in range(lowercase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
|
"""simple docstring"""
import random
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = False ):
"""simple docstring"""
_UpperCAmelCase = {i: [] for i in range(lowercase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowercase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowercase ):
for j in range(i + 1 ,lowercase ):
if random.random() < probability:
graph[i].append(lowercase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowercase )
return graph
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return {
i: [j for j in range(lowercase ) if i != j] for i in range(lowercase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCamelCase_ : Tuple = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether tp freeze the encoder."""} )
__UpperCamelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
__UpperCamelCase : Optional[str] = field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
__UpperCamelCase : Optional[int] = field(
default=1_024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=128 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
__UpperCamelCase : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
__UpperCamelCase : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
__UpperCamelCase : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Source language id for translation."""} )
__UpperCamelCase : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Target language id for translation."""} )
__UpperCamelCase : Optional[int] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """# num_beams to use for evaluation."""} )
__UpperCamelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(lowerCamelCase , os.path.join(lowerCamelCase , F'''{split}_results.json''' ) )
def A__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_: List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_: Optional[int] = parser.parse_args_into_dataclasses()
check_output_dir(lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_: List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_: Tuple = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
assert hasattr(lowerCamelCase , lowerCamelCase ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_: Any = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowerCamelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase_: Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowerCamelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_: List[str] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase_: List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowerCamelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase_: Optional[Any] = SeqaSeqDataset
# Get datasets
UpperCamelCase_: List[Any] = (
dataset_class(
lowerCamelCase , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
UpperCamelCase_: Optional[int] = (
dataset_class(
lowerCamelCase , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase_: Optional[Any] = (
dataset_class(
lowerCamelCase , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase_: Tuple = (
build_compute_metrics_fn(data_args.task , lowerCamelCase ) if training_args.predict_with_generate else None
)
UpperCamelCase_: Optional[int] = SeqaSeqTrainer(
model=lowerCamelCase , args=lowerCamelCase , data_args=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , data_collator=SeqaSeqDataCollator(
lowerCamelCase , lowerCamelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowerCamelCase , tokenizer=lowerCamelCase , )
UpperCamelCase_: Optional[Any] = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
UpperCamelCase_: Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase_: Dict = train_result.metrics
UpperCamelCase_: Optional[int] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowerCamelCase , training_args.output_dir )
all_metrics.update(lowerCamelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_: List[str] = trainer.evaluate(metric_key_prefix="""val""" )
UpperCamelCase_: int = data_args.n_val
UpperCamelCase_: Any = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowerCamelCase , training_args.output_dir )
all_metrics.update(lowerCamelCase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
UpperCamelCase_: List[Any] = trainer.predict(test_dataset=lowerCamelCase , metric_key_prefix="""test""" )
UpperCamelCase_: Union[str, Any] = test_output.metrics
UpperCamelCase_: List[str] = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase_: Dict = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowerCamelCase , training_args.output_dir )
all_metrics.update(lowerCamelCase )
if training_args.predict_with_generate:
UpperCamelCase_: str = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
UpperCamelCase_: Any = lmap(str.strip , lowerCamelCase )
write_txt_file(lowerCamelCase , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowerCamelCase , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def A__ ( lowerCamelCase ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 548
|
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : int = [i[0] for i in r], [i[1] for i in r]
UpperCamelCase : Optional[Any] = list(accumulate(SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Optional[Any] = bisect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
| 0
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
snake_case__ = StableDiffusionDiffEditPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
snake_case__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case__ = frozenset([] )
def __UpperCamelCase ( self : int) -> int:
"""simple docstring"""
torch.manual_seed(0)
_lowerCAmelCase:int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=a__ ,)
_lowerCAmelCase:Tuple = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=a__ ,set_alpha_to_one=a__ ,)
_lowerCAmelCase:Any = DDIMInverseScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=a__ ,set_alpha_to_zero=a__ ,)
torch.manual_seed(0)
_lowerCAmelCase:Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0)
_lowerCAmelCase:Optional[int] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='''gelu''' ,projection_dim=512 ,)
_lowerCAmelCase:Union[str, Any] = CLIPTextModel(a__)
_lowerCAmelCase:str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
_lowerCAmelCase:int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __UpperCamelCase ( self : Any ,a__ : Dict ,a__ : int=0) -> Any:
"""simple docstring"""
_lowerCAmelCase:List[Any] = floats_tensor((1, 16, 16) ,rng=random.Random(a__)).to(a__)
_lowerCAmelCase:List[Any] = floats_tensor((1, 2, 4, 16, 16) ,rng=random.Random(a__)).to(a__)
if str(a__).startswith('''mps'''):
_lowerCAmelCase:List[str] = torch.manual_seed(a__)
else:
_lowerCAmelCase:Optional[Any] = torch.Generator(device=a__).manual_seed(a__)
_lowerCAmelCase:str = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCamelCase ( self : Dict ,a__ : Union[str, Any] ,a__ : Any=0) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:List[str] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(a__)).to(a__)
_lowerCAmelCase:List[str] = image.cpu().permute(0 ,2 ,3 ,1)[0]
_lowerCAmelCase:Union[str, Any] = Image.fromarray(np.uinta(a__)).convert('''RGB''')
if str(a__).startswith('''mps'''):
_lowerCAmelCase:List[str] = torch.manual_seed(a__)
else:
_lowerCAmelCase:Union[str, Any] = torch.Generator(device=a__).manual_seed(a__)
_lowerCAmelCase:Optional[int] = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCamelCase ( self : List[str] ,a__ : Optional[int] ,a__ : str=0) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Tuple = floats_tensor((1, 3, 32, 32) ,rng=random.Random(a__)).to(a__)
_lowerCAmelCase:List[str] = image.cpu().permute(0 ,2 ,3 ,1)[0]
_lowerCAmelCase:int = Image.fromarray(np.uinta(a__)).convert('''RGB''')
if str(a__).startswith('''mps'''):
_lowerCAmelCase:List[str] = torch.manual_seed(a__)
else:
_lowerCAmelCase:Optional[Any] = torch.Generator(device=a__).manual_seed(a__)
_lowerCAmelCase:Any = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCamelCase ( self : Tuple) -> int:
"""simple docstring"""
if not hasattr(self.pipeline_class ,'''_optional_components'''):
return
_lowerCAmelCase:Any = self.get_dummy_components()
_lowerCAmelCase:str = self.pipeline_class(**a__)
pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a__ ,a__ ,a__)
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components})
_lowerCAmelCase:List[str] = self.get_dummy_inputs(a__)
_lowerCAmelCase:Dict = pipe(**a__)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a__)
_lowerCAmelCase:int = self.pipeline_class.from_pretrained(a__)
pipe_loaded.to(a__)
pipe_loaded.set_progress_bar_config(disable=a__)
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a__ ,a__) is None ,F'`{optional_component}` did not stay set to None after loading.' ,)
_lowerCAmelCase:Optional[Any] = self.get_dummy_inputs(a__)
_lowerCAmelCase:str = pipe_loaded(**a__)[0]
_lowerCAmelCase:Union[str, Any] = np.abs(output - output_loaded).max()
self.assertLess(a__ ,1E-4)
def __UpperCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = '''cpu'''
_lowerCAmelCase:str = self.get_dummy_components()
_lowerCAmelCase:Any = self.pipeline_class(**a__)
pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:str = self.get_dummy_mask_inputs(a__)
_lowerCAmelCase:List[str] = pipe.generate_mask(**a__)
_lowerCAmelCase:Union[str, Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape ,(1, 16, 16))
_lowerCAmelCase:Optional[Any] = np.array([0] * 9)
_lowerCAmelCase:Optional[Any] = np.abs(mask_slice.flatten() - expected_slice).max()
self.assertLessEqual(a__ ,1E-3)
self.assertEqual(mask[0, -3, -4] ,0)
def __UpperCamelCase ( self : Any) -> str:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = '''cpu'''
_lowerCAmelCase:str = self.get_dummy_components()
_lowerCAmelCase:int = self.pipeline_class(**a__)
pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Dict = self.get_dummy_inversion_inputs(a__)
_lowerCAmelCase:Optional[Any] = pipe.invert(**a__).images
_lowerCAmelCase:List[str] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape ,(2, 32, 32, 3))
_lowerCAmelCase:Dict = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] ,)
_lowerCAmelCase:str = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(a__ ,1E-3)
def __UpperCamelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3)
def __UpperCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_lowerCAmelCase:Tuple = '''cpu'''
_lowerCAmelCase:Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase:List[str] = {'''beta_start''': 0.00085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
_lowerCAmelCase:Optional[int] = DPMSolverMultistepScheduler(**a__)
_lowerCAmelCase:List[str] = DPMSolverMultistepInverseScheduler(**a__)
_lowerCAmelCase:List[str] = self.pipeline_class(**a__)
pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:str = self.get_dummy_inversion_inputs(a__)
_lowerCAmelCase:Tuple = pipe.invert(**a__).images
_lowerCAmelCase:Optional[int] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape ,(2, 32, 32, 3))
_lowerCAmelCase:List[Any] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] ,)
_lowerCAmelCase:List[Any] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(a__ ,1E-3)
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __UpperCamelCase ( cls : Tuple) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''')
_lowerCAmelCase:Optional[int] = raw_image.convert('''RGB''').resize((768, 768))
_lowerCAmelCase:Dict = raw_image
def __UpperCamelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:str = torch.manual_seed(0)
_lowerCAmelCase:List[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' ,safety_checker=a__ ,torch_dtype=torch.floataa)
_lowerCAmelCase:Union[str, Any] = DDIMScheduler.from_config(pipe.scheduler.config)
_lowerCAmelCase:Tuple = DDIMInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Any = '''a bowl of fruit'''
_lowerCAmelCase:List[str] = '''a bowl of pears'''
_lowerCAmelCase:List[str] = pipe.generate_mask(
image=self.raw_image ,source_prompt=a__ ,target_prompt=a__ ,generator=a__ ,)
_lowerCAmelCase:str = pipe.invert(
prompt=a__ ,image=self.raw_image ,inpaint_strength=0.7 ,generator=a__).latents
_lowerCAmelCase:Optional[int] = pipe(
prompt=a__ ,mask_image=a__ ,image_latents=a__ ,generator=a__ ,negative_prompt=a__ ,inpaint_strength=0.7 ,output_type='''numpy''' ,).images[0]
_lowerCAmelCase:List[str] = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
def __UpperCamelCase ( self : Dict) -> int:
"""simple docstring"""
_lowerCAmelCase:int = torch.manual_seed(0)
_lowerCAmelCase:List[str] = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' ,safety_checker=a__ ,torch_dtype=torch.floataa)
_lowerCAmelCase:List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCAmelCase:Dict = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a__)
_lowerCAmelCase:Any = '''a bowl of fruit'''
_lowerCAmelCase:Union[str, Any] = '''a bowl of pears'''
_lowerCAmelCase:Optional[int] = pipe.generate_mask(
image=self.raw_image ,source_prompt=a__ ,target_prompt=a__ ,generator=a__ ,)
_lowerCAmelCase:Dict = pipe.invert(
prompt=a__ ,image=self.raw_image ,inpaint_strength=0.7 ,generator=a__ ,num_inference_steps=25 ,).latents
_lowerCAmelCase:Optional[int] = pipe(
prompt=a__ ,mask_image=a__ ,image_latents=a__ ,generator=a__ ,negative_prompt=a__ ,inpaint_strength=0.7 ,num_inference_steps=25 ,output_type='''numpy''' ,).images[0]
_lowerCAmelCase:Optional[Any] = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
| 439
|
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__ :
def __init__( self : List[str] ,a__ : List[Any] ,a__ : List[str]=2 ,a__ : Optional[Any]=32 ,a__ : int=16 ,a__ : Dict=3 ,a__ : Optional[int]=True ,a__ : int=True ,a__ : Optional[Any]=32 ,a__ : str=4 ,a__ : Tuple=[0, 1, 2, 3] ,a__ : Any=4 ,a__ : int=37 ,a__ : int="gelu" ,a__ : Optional[int]=0.1 ,a__ : List[Any]=0.1 ,a__ : Optional[Any]=0.02 ,a__ : str=3 ,a__ : str=[1, 384, 24, 24] ,a__ : Optional[Any]=True ,a__ : Tuple=None ,) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = parent
_lowerCAmelCase:Union[str, Any] = batch_size
_lowerCAmelCase:List[Any] = image_size
_lowerCAmelCase:int = patch_size
_lowerCAmelCase:Optional[int] = num_channels
_lowerCAmelCase:List[str] = is_training
_lowerCAmelCase:int = use_labels
_lowerCAmelCase:Any = hidden_size
_lowerCAmelCase:Any = num_hidden_layers
_lowerCAmelCase:List[Any] = backbone_out_indices
_lowerCAmelCase:int = num_attention_heads
_lowerCAmelCase:Tuple = intermediate_size
_lowerCAmelCase:int = hidden_act
_lowerCAmelCase:Optional[int] = hidden_dropout_prob
_lowerCAmelCase:Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase:Any = initializer_range
_lowerCAmelCase:int = num_labels
_lowerCAmelCase:Optional[int] = backbone_featmap_shape
_lowerCAmelCase:List[str] = scope
_lowerCAmelCase:Union[str, Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase:Any = (image_size // patch_size) ** 2
_lowerCAmelCase:Tuple = num_patches + 1
def __UpperCamelCase ( self : Tuple) -> Dict:
"""simple docstring"""
_lowerCAmelCase:List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase:Optional[int] = None
if self.use_labels:
_lowerCAmelCase:Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels)
_lowerCAmelCase:Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,backbone_out_indices=self.backbone_out_indices ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=a__ ,initializer_range=self.initializer_range ,is_hybrid=self.is_hybrid ,backbone_config=a__ ,backbone_featmap_shape=self.backbone_featmap_shape ,)
def __UpperCamelCase ( self : int ,a__ : Tuple ,a__ : Tuple ,a__ : Tuple) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Dict = DPTModel(config=a__)
model.to(a__)
model.eval()
_lowerCAmelCase:Optional[Any] = model(a__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : str ,a__ : Optional[int] ,a__ : Union[str, Any] ,a__ : int) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase:Tuple = self.num_labels
_lowerCAmelCase:List[Any] = DPTForDepthEstimation(a__)
model.to(a__)
model.eval()
_lowerCAmelCase:int = model(a__)
self.parent.assertEqual(result.predicted_depth.shape ,(self.batch_size, self.image_size, self.image_size))
def __UpperCamelCase ( self : Union[str, Any] ,a__ : Dict ,a__ : List[Any] ,a__ : str) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase:Dict = self.num_labels
_lowerCAmelCase:str = DPTForSemanticSegmentation(a__)
model.to(a__)
model.eval()
_lowerCAmelCase:int = model(a__ ,labels=a__)
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size))
def __UpperCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:Any = config_and_inputs
_lowerCAmelCase:Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
snake_case__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
snake_case__ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
def __UpperCamelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:List[str] = DPTModelTester(self)
_lowerCAmelCase:List[str] = ConfigTester(self ,config_class=a__ ,has_text_modality=a__ ,hidden_size=37)
def __UpperCamelCase ( self : str) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''')
def __UpperCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase:Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase:Union[str, Any] = model_class(a__)
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module))
_lowerCAmelCase:Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ ,nn.Linear))
def __UpperCamelCase ( self : Dict) -> int:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase:Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase:int = model_class(a__)
_lowerCAmelCase:Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase:Any = [*signature.parameters.keys()]
_lowerCAmelCase:Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,a__)
def __UpperCamelCase ( self : Tuple) -> int:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__)
def __UpperCamelCase ( self : Dict) -> int:
"""simple docstring"""
_lowerCAmelCase:Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a__)
def __UpperCamelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a__)
def __UpperCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowerCAmelCase , _lowerCAmelCase:Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase:Union[str, Any] = True
if model_class in get_values(a__):
continue
_lowerCAmelCase:Union[str, Any] = model_class(a__)
model.to(a__)
model.train()
_lowerCAmelCase:Optional[int] = self._prepare_for_class(a__ ,a__ ,return_labels=a__)
_lowerCAmelCase:Union[str, Any] = model(**a__).loss
loss.backward()
def __UpperCamelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowerCAmelCase , _lowerCAmelCase:Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase:str = False
_lowerCAmelCase:str = True
if model_class in get_values(a__) or not model_class.supports_gradient_checkpointing:
continue
_lowerCAmelCase:Dict = model_class(a__)
model.to(a__)
model.gradient_checkpointing_enable()
model.train()
_lowerCAmelCase:Any = self._prepare_for_class(a__ ,a__ ,return_labels=a__)
_lowerCAmelCase:Tuple = model(**a__).loss
loss.backward()
def __UpperCamelCase ( self : Any) -> int:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase:Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase:Dict = _config_zero_init(a__)
for model_class in self.all_model_classes:
_lowerCAmelCase:Tuple = model_class(config=a__)
# Skip the check for the backbone
_lowerCAmelCase:Tuple = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_lowerCAmelCase:Tuple = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,)
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def __UpperCamelCase ( self : Dict) -> Any:
"""simple docstring"""
pass
@slow
def __UpperCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_lowerCAmelCase:str = DPTModel.from_pretrained(a__)
self.assertIsNotNone(a__)
def __UpperCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase:List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase:Any = '''add'''
with self.assertRaises(a__):
_lowerCAmelCase:Any = DPTForDepthEstimation(a__)
def UpperCAmelCase ( ):
_lowerCAmelCase:Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:List[str] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''')
_lowerCAmelCase:Optional[int] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''').to(a__)
_lowerCAmelCase:Tuple = prepare_img()
_lowerCAmelCase:List[str] = image_processor(images=a__ ,return_tensors='''pt''').to(a__)
# forward pass
with torch.no_grad():
_lowerCAmelCase:Dict = model(**a__)
_lowerCAmelCase:List[str] = outputs.predicted_depth
# verify the predicted depth
_lowerCAmelCase:str = torch.Size((1, 384, 384))
self.assertEqual(predicted_depth.shape ,a__)
_lowerCAmelCase:Any = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]]).to(a__)
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 ,a__ ,atol=1E-4))
| 439
| 1
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = jnp.ones((batch_size, length) ) / length
return scores
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : int = 20
SCREAMING_SNAKE_CASE_ : int = self._get_uniform_logits(batch_size=2 ,length=snake_case__ )
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_ : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
SCREAMING_SNAKE_CASE_ : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_ : int = jax.nn.softmax(snake_case__ ,axis=-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ : Dict = FlaxTemperatureLogitsWarper(temperature=1.3 )
SCREAMING_SNAKE_CASE_ : Any = jax.nn.softmax(temp_dist_warper_sharper(snake_case__ ,scores.copy() ,cur_len=snake_case__ ) ,axis=-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(snake_case__ ,scores.copy() ,cur_len=snake_case__ ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = 10
SCREAMING_SNAKE_CASE_ : int = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.broadcast_to(np.arange(snake_case__ )[None, :] ,(batch_size, vocab_size) ).copy()
SCREAMING_SNAKE_CASE_ : Optional[int] = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_k_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
SCREAMING_SNAKE_CASE_ : int = 5
SCREAMING_SNAKE_CASE_ : Any = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.broadcast_to(np.arange(snake_case__ )[None, :] ,(batch_size, length) ).copy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_k_warp_safety_check(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : str = 10
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_ : Any = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
SCREAMING_SNAKE_CASE_ : Dict = FlaxTopPLogitsWarper(0.8 )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.exp(top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_ : str = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_ : List[Any] = np.broadcast_to(np.arange(snake_case__ )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_ : Any = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = 20
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : str = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=snake_case__ )
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor((batch_size, 20) ,vocab_size=20 )
SCREAMING_SNAKE_CASE_ : Optional[int] = 5
SCREAMING_SNAKE_CASE_ : List[str] = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = min_dist_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_ : Optional[int] = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = 15
SCREAMING_SNAKE_CASE_ : Any = min_dist_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = 20
SCREAMING_SNAKE_CASE_ : List[Any] = 4
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor((batch_size, 1) ,vocab_size=20 )
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : str = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Dict = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 20
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : Tuple = 5
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ ,eos_token_id=snake_case__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((batch_size, 4) ,vocab_size=20 )
SCREAMING_SNAKE_CASE_ : List[Any] = 4
SCREAMING_SNAKE_CASE_ : Dict = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_ : List[Any] = 3
SCREAMING_SNAKE_CASE_ : int = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 4
SCREAMING_SNAKE_CASE_ : Dict = 10
SCREAMING_SNAKE_CASE_ : int = 15
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((batch_size, sequence_length) ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = input_ids.copy()
SCREAMING_SNAKE_CASE_ : str = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ : int = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ : Dict = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ : Any = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = 10
# no processor list
SCREAMING_SNAKE_CASE_ : Optional[int] = temp_dist_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = top_k_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = min_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = bos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = eos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# with processor list
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE_ : Any = processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : Optional[Any] = 10
SCREAMING_SNAKE_CASE_ : Dict = 15
SCREAMING_SNAKE_CASE_ : Dict = 2
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor((batch_size, sequence_length) ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = input_ids.copy()
SCREAMING_SNAKE_CASE_ : List[Any] = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ : List[str] = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = 10
# no processor list
def run_no_processor_list(snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = temp_dist_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = top_k_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = min_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = bos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = eos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
return scores
# with processor list
def run_processor_list(snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE_ : List[str] = processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
return scores
SCREAMING_SNAKE_CASE_ : Tuple = jax.jit(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = jax.jit(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = jitted_run_no_processor_list(snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = jitted_run_processor_list(snake_case__ ,snake_case__ ,snake_case__ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 105
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="openai/whisper-base"
SCREAMING_SNAKE_CASE_ : List[str] =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
SCREAMING_SNAKE_CASE_ : int ="transcriber"
SCREAMING_SNAKE_CASE_ : Tuple =WhisperProcessor
SCREAMING_SNAKE_CASE_ : List[Any] =WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE_ : Union[str, Any] =["audio"]
SCREAMING_SNAKE_CASE_ : Any =["text"]
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
return self.pre_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).input_features
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
return self.model.generate(inputs=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
return self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )[0]
| 282
| 0
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
_a : List[Any] = k_size // 2
_a , _a : Dict = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_a : int = 1 / (2 * pi * sigma) * exp(-(square(UpperCAmelCase ) + square(UpperCAmelCase )) / (2 * square(UpperCAmelCase )) )
return g
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
_a , _a : Optional[Any] = image.shape[0], image.shape[1]
# dst image height and width
_a : List[Any] = height - k_size + 1
_a : Optional[Any] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_a : Any = zeros((dst_height * dst_width, k_size * k_size) )
_a : List[str] = 0
for i, j in product(range(UpperCAmelCase ) , range(UpperCAmelCase ) ):
_a : List[Any] = ravel(image[i : i + k_size, j : j + k_size] )
_a : List[str] = window
row += 1
# turn the kernel into shape(k*k, 1)
_a : List[str] = gen_gaussian_kernel(UpperCAmelCase , UpperCAmelCase )
_a : Dict = ravel(UpperCAmelCase )
# reshape and get the dst image
_a : str = dot(UpperCAmelCase , UpperCAmelCase ).reshape(UpperCAmelCase , UpperCAmelCase ).astype(UpperCAmelCase )
return dst
if __name__ == "__main__":
# read original image
__lowerCamelCase = imread(R'../image_data/lena.jpg')
# turn image in gray scale value
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__lowerCamelCase = gaussian_filter(gray, 3, sigma=1)
__lowerCamelCase = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 307
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
_a : Any = LxmertConfig.from_json_file(UpperCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
_a : List[Any] = LxmertForPreTraining(UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 307
| 1
|
def _lowerCamelCase ( lowerCamelCase_: int , lowerCamelCase_: int ):
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(lowercase__ , x % y )
def _lowerCamelCase ( lowerCamelCase_: int , lowerCamelCase_: int ):
'''simple docstring'''
return (x * y) // greatest_common_divisor(lowercase__ , lowercase__ )
def _lowerCamelCase ( lowerCamelCase_: int = 20 ):
'''simple docstring'''
A : int = 1
for i in range(1 , n + 1 ):
A : Optional[int] = lcm(lowercase__ , lowercase__ )
return g
if __name__ == "__main__":
print(F'''{solution() = }''')
| 256
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __snake_case :
def __init__( self: Any , A_: str , A_: str=13 , A_: Dict=7 , A_: Optional[int]=True , A_: int=True , A_: List[str]=True , A_: Any=True , A_: List[Any]=99 , A_: Tuple=32 , A_: Union[str, Any]=2 , A_: Dict=4 , A_: Tuple=37 , A_: Dict="gelu" , A_: List[Any]=0.1 , A_: int=0.1 , A_: Union[str, Any]=5_12 , A_: List[Any]=16 , A_: Optional[Any]=2 , A_: int=0.02 , A_: List[str]=3 , A_: Optional[int]=4 , A_: List[str]=None , ):
__lowerCamelCase = parent
__lowerCamelCase = 13
__lowerCamelCase = 7
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = 99
__lowerCamelCase = 32
__lowerCamelCase = 2
__lowerCamelCase = 4
__lowerCamelCase = 37
__lowerCamelCase = """gelu"""
__lowerCamelCase = 0.1
__lowerCamelCase = 0.1
__lowerCamelCase = 5_12
__lowerCamelCase = 16
__lowerCamelCase = 2
__lowerCamelCase = 0.02
__lowerCamelCase = 3
__lowerCamelCase = 4
__lowerCamelCase = None
def __a ( self: Optional[Any] ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self: Union[str, Any] , A_: str , A_: Dict , A_: Union[str, Any] , A_: Optional[int] , A_: Any , A_: Dict , A_: int ):
__lowerCamelCase = TFRoFormerModel(config=A_ )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(A_ )
__lowerCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self: List[str] , A_: int , A_: str , A_: Any , A_: int , A_: str , A_: str , A_: Union[str, Any] ):
__lowerCamelCase = True
__lowerCamelCase = TFRoFormerForCausalLM(config=A_ )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(A_ )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def __a ( self: Any , A_: Optional[Any] , A_: Optional[Any] , A_: Union[str, Any] , A_: str , A_: str , A_: Optional[Any] , A_: int ):
__lowerCamelCase = TFRoFormerForMaskedLM(config=A_ )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self: Optional[Any] , A_: Dict , A_: Union[str, Any] , A_: List[Any] , A_: str , A_: int , A_: Optional[Any] , A_: int ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFRoFormerForSequenceClassification(config=A_ )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self: List[str] , A_: Tuple , A_: List[str] , A_: Union[str, Any] , A_: Optional[Any] , A_: List[str] , A_: Optional[int] , A_: List[Any] ):
__lowerCamelCase = self.num_choices
__lowerCamelCase = TFRoFormerForMultipleChoice(config=A_ )
__lowerCamelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowerCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self: Union[str, Any] , A_: Dict , A_: List[Any] , A_: List[Any] , A_: Union[str, Any] , A_: str , A_: Union[str, Any] , A_: List[str] ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFRoFormerForTokenClassification(config=A_ )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self: int , A_: Union[str, Any] , A_: Any , A_: Any , A_: Dict , A_: str , A_: int , A_: Tuple ):
__lowerCamelCase = TFRoFormerForQuestionAnswering(config=A_ )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self: Union[str, Any] ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __snake_case (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__a = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__a = False
__a = False
def __a ( self: List[str] , A_: List[Any] , A_: List[str] , A_: str , A_: Union[str, Any] , A_: Dict ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def __a ( self: Tuple ):
__lowerCamelCase = TFRoFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __a ( self: int ):
self.config_tester.run_common_tests()
def __a ( self: Optional[int] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __a ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __a ( self: Optional[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def __a ( self: Tuple ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def __a ( self: Any ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def __a ( self: int ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __a ( self: int ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __a ( self: Optional[int] ):
__lowerCamelCase = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(A_ )
@require_tf
class __snake_case (unittest.TestCase ):
@slow
def __a ( self: Optional[Any] ):
__lowerCamelCase = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__lowerCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase = model(A_ )[0]
# TODO Replace vocab size
__lowerCamelCase = 5_00_00
__lowerCamelCase = [1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__lowerCamelCase = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class __snake_case (unittest.TestCase ):
__a = 1e-4
def __a ( self: List[Any] ):
__lowerCamelCase = tf.constant([[4, 10]] )
__lowerCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__lowerCamelCase = emba(input_ids.shape )
__lowerCamelCase = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def __a ( self: Optional[Any] ):
__lowerCamelCase = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
__lowerCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
__lowerCamelCase = emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class __snake_case (unittest.TestCase ):
__a = 1e-4
def __a ( self: str ):
# 2,12,16,64
__lowerCamelCase = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
__lowerCamelCase = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
__lowerCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__lowerCamelCase = embed_positions([2, 16, 7_68] )[None, None, :, :]
__lowerCamelCase ,__lowerCamelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__lowerCamelCase = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
__lowerCamelCase = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 281
| 0
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def snake_case__ ( self ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = BioGptModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
__lowercase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
__lowercase = BioGptForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
__lowercase = BioGptModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# create attention mask
__lowercase = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase_ )
__lowercase = self.seq_length // 2
__lowercase = 0
# first forward pass
__lowercase , __lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__lowercase = ids_tensor((1,) , lowerCAmelCase_ ).item() + 1
__lowercase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__lowercase = random_other_next_tokens
# append to next input_ids and attn_mask
__lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCAmelCase_ )] , dim=1 , )
# get two different outputs
__lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )["last_hidden_state"]
__lowercase = model(lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )["last_hidden_state"]
# select random slice
__lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
__lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
__lowercase = BioGptModel(config=lowerCAmelCase_ ).to(lowerCAmelCase_ ).eval()
__lowercase = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase_ )
# first forward pass
__lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
__lowercase , __lowercase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )["last_hidden_state"]
__lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[
"last_hidden_state"
]
# select random slice
__lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ , lowerCAmelCase_=False ):
__lowercase = BioGptForCausalLM(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__lowercase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case__ ( self , lowerCAmelCase_ , *lowerCAmelCase_ ):
__lowercase = BioGptModel(lowerCAmelCase_ )
__lowercase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
__lowercase = self.num_labels
__lowercase = BioGptForTokenClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __snake_case ,__snake_case ,__snake_case ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__lowerCAmelCase = (BioGptForCausalLM,) if is_torch_available() else ()
__lowerCAmelCase = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
def snake_case__ ( self ):
__lowercase = BioGptModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase_ , gradient_checkpointing=lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase_ )
@slow
def snake_case__ ( self ):
__lowercase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(lowerCAmelCase_ )
__lowercase = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__lowercase = "left"
# Define PAD Token = EOS Token = 50256
__lowercase = tokenizer.eos_token
__lowercase = model.config.eos_token_id
# use different length sentences to test batching
__lowercase = [
"Hello, my dog is a little",
"Today, I",
]
__lowercase = tokenizer(lowerCAmelCase_ , return_tensors="pt" , padding=lowerCAmelCase_ )
__lowercase = inputs["input_ids"].to(lowerCAmelCase_ )
__lowercase = model.generate(
input_ids=lowerCAmelCase_ , attention_mask=inputs["attention_mask"].to(lowerCAmelCase_ ) , )
__lowercase = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(lowerCAmelCase_ )
__lowercase = model.generate(input_ids=lowerCAmelCase_ )
__lowercase = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
__lowercase = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(lowerCAmelCase_ )
__lowercase = model.generate(input_ids=lowerCAmelCase_ , max_length=model.config.max_length - num_paddings )
__lowercase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
__lowercase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowercase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowercase = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [non_padded_sentence, padded_sentence] )
@slow
def snake_case__ ( self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = BioGptModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = input_dict["input_ids"]
__lowercase = input_ids.ne(1 ).to(lowerCAmelCase_ )
__lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowercase = BioGptForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__ ( self ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = "multi_label_classification"
__lowercase = input_dict["input_ids"]
__lowercase = input_ids.ne(1 ).to(lowerCAmelCase_ )
__lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowercase = BioGptForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self ):
__lowercase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
__lowercase = torch.tensor([[2, 4805, 9, 656, 21]] )
__lowercase = model(lowerCAmelCase_ )[0]
__lowercase = 4_2384
__lowercase = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowercase = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def snake_case__ ( self ):
__lowercase = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__lowercase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(lowerCAmelCase_ )
torch.manual_seed(0 )
__lowercase = tokenizer("COVID-19 is" , return_tensors="pt" ).to(lowerCAmelCase_ )
__lowercase = model.generate(
**lowerCAmelCase_ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=lowerCAmelCase_ , )
__lowercase = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase_ )
__lowercase = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 576
|
from __future__ import annotations
def __lowercase ( _UpperCAmelCase = 4 ) -> list[list[int]]:
'''simple docstring'''
__lowercase = abs(_UpperCAmelCase ) or 4
return [[1 + x + y * row_size for x in range(_UpperCAmelCase )] for y in range(_UpperCAmelCase )]
def __lowercase ( _UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(transpose(_UpperCAmelCase ) )
# OR.. transpose(reverse_column(matrix))
def __lowercase ( _UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(reverse_column(_UpperCAmelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def __lowercase ( _UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
return reverse_column(transpose(_UpperCAmelCase ) )
# OR.. transpose(reverse_row(matrix))
def __lowercase ( _UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
__lowercase = [list(_UpperCAmelCase ) for x in zip(*_UpperCAmelCase )]
return matrix
def __lowercase ( _UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
__lowercase = matrix[::-1]
return matrix
def __lowercase ( _UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
__lowercase = [x[::-1] for x in matrix]
return matrix
def __lowercase ( _UpperCAmelCase ) -> None:
'''simple docstring'''
for i in matrix:
print(*_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
lowerCAmelCase__ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
lowerCAmelCase__ = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 576
| 1
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case__ = Lock()
def lowerCamelCase__ ( a : Union[str, Any] , a : int , a : List[Any] , a : str , a : List[Any] , a : Optional[Any] , a : Any ) -> str:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
a__ :int = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
a__ :str = min(a , a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
a__ :Optional[int] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
a__ :int = max(a , a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(a )
def lowerCamelCase__ ( a : Optional[int] ) -> Dict:
"""simple docstring"""
a__ :str = []
a__ :Optional[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
a__ :List[Any] = Pipe()
a__ :int = Pipe()
process_array_.append(
Process(
target=a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
a__ :str = temp_rs
a__ :Dict = temp_rr
for i in range(1 , len(a ) - 1 ):
a__ :Dict = Pipe()
a__ :Any = Pipe()
process_array_.append(
Process(
target=a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
a__ :Optional[int] = temp_rs
a__ :str = temp_rr
process_array_.append(
Process(
target=a , args=(
len(a ) - 1,
arr[len(a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(a ) ):
a__ :Any = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
a__ :str = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*a )
a__ :Optional[int] = odd_even_transposition(a )
print("Sorted List\n" )
print(*a )
if __name__ == "__main__":
main()
| 395
|
import math
def lowerCamelCase__ ( a : list , a : int ) -> int:
"""simple docstring"""
a__ :str = len(a )
a__ :List[str] = int(math.floor(math.sqrt(a ) ) )
a__ :int = 0
while arr[min(a , a ) - 1] < x:
a__ :Union[str, Any] = step
step += int(math.floor(math.sqrt(a ) ) )
if prev >= n:
return -1
while arr[prev] < x:
a__ :str = prev + 1
if prev == min(a , a ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
snake_case__ = input('''Enter numbers separated by a comma:\n''').strip()
snake_case__ = [int(item) for item in user_input.split(''',''')]
snake_case__ = int(input('''Enter the number to be searched:\n'''))
snake_case__ = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f'''Number {x} is at index {res}''')
| 395
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __magic_name__ ( __lowerCamelCase ):
'''simple docstring'''
__lowercase : Optional[Any] = '''gptj'''
__lowercase : Any = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , __lowercase=50_400 , __lowercase=2_048 , __lowercase=4_096 , __lowercase=28 , __lowercase=16 , __lowercase=64 , __lowercase=None , __lowercase="gelu_new" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=1E-5 , __lowercase=0.0_2 , __lowercase=True , __lowercase=50_256 , __lowercase=50_256 , __lowercase=False , **__lowercase , ):
"""simple docstring"""
__A : Dict = vocab_size
__A : Any = n_positions
__A : Tuple = n_embd
__A : List[Any] = n_layer
__A : Tuple = n_head
__A : Optional[Any] = n_inner
__A : Optional[Any] = rotary_dim
__A : Dict = activation_function
__A : List[str] = resid_pdrop
__A : List[str] = embd_pdrop
__A : Tuple = attn_pdrop
__A : Optional[int] = layer_norm_epsilon
__A : Tuple = initializer_range
__A : int = use_cache
__A : int = bos_token_id
__A : List[Any] = eos_token_id
super().__init__(
bos_token_id=a_ , eos_token_id=a_ , tie_word_embeddings=a_ , **a_ )
class __magic_name__ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase = "default" , __lowercase = None , __lowercase = False , ):
"""simple docstring"""
super().__init__(a_ , task=a_ , patching_specs=a_ , use_past=a_ )
if not getattr(self._config , 'pad_token_id' , a_ ):
# TODO: how to do that better?
__A : int = 0
@property
def snake_case__ ( self ):
"""simple docstring"""
__A : Tuple = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(a_ , direction='inputs' )
__A : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
__A : Optional[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def snake_case__ ( self ):
"""simple docstring"""
return self._config.n_layer
@property
def snake_case__ ( self ):
"""simple docstring"""
return self._config.n_head
def snake_case__ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ):
"""simple docstring"""
__A : str = super(a_ , self ).generate_dummy_inputs(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
# We need to order the input in the way they appears in the forward()
__A : Dict = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__A : Optional[int] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__A : Tuple = seqlen + 2
__A : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__A : List[str] = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers )
]
__A : Union[str, Any] = common_inputs["attention_mask"]
if self.use_past:
__A : Optional[int] = ordered_inputs["attention_mask"].dtype
__A : Any = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
return ordered_inputs
@property
def snake_case__ ( self ):
"""simple docstring"""
return 13
| 707
|
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def _lowercase ( UpperCamelCase__ : int = 1000000, UpperCamelCase__ : int = 10 ):
__A : defaultdict = defaultdict(UpperCamelCase__ )
for outer_width in range(3, (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__A : Optional[int] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ), 1 )
else:
__A : Optional[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(UpperCamelCase__, outer_width - 1, 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 540
| 0
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE : Any = get_tests_dir("fixtures")
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE_ = mock.Mock()
SCREAMING_SNAKE_CASE_ = 500
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = HTTPError
SCREAMING_SNAKE_CASE_ = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=__a ) as mock_head:
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ ( self ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def a__ ( cls ) -> Tuple:
SCREAMING_SNAKE_CASE_ = TOKEN
HfFolder.save_token(__a )
@classmethod
def a__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token, repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def a__ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(__a )
feature_extractor.push_to_hub('test-feature-extractor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__a, repo_id='test-feature-extractor', push_to_hub=__a, use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a ) )
def a__ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(__a )
feature_extractor.push_to_hub('valid_org/test-feature-extractor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__a, repo_id='valid_org/test-feature-extractor-org', push_to_hub=__a, use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__a, getattr(__a, __a ) )
def a__ ( self ) -> Tuple:
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE_ = CustomFeatureExtractor.from_pretrained(__a )
feature_extractor.push_to_hub('test-dynamic-feature-extractor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map, {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'}, )
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""", trust_remote_code=__a )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__, 'CustomFeatureExtractor' )
| 294
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def snake_case ( ) -> Generator[int, None, None]:
lowerCamelCase : dict[int, int] = {}
lowerCamelCase : str = 2
while True:
lowerCamelCase : int = factor_map.pop(UpperCamelCase__ , UpperCamelCase__ )
if factor:
lowerCamelCase : List[Any] = factor + prime
while x in factor_map:
x += factor
lowerCamelCase : int = factor
else:
lowerCamelCase : Optional[int] = prime
yield prime
prime += 1
def snake_case ( UpperCamelCase__ : float = 1E10 ) -> int:
lowerCamelCase : Optional[int] = sieve()
lowerCamelCase : List[str] = 1
while True:
lowerCamelCase : Tuple = next(UpperCamelCase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(UpperCamelCase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 222
| 0
|
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
UpperCAmelCase_ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCAmelCase_ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 561
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : List[Any] =SpeechTaTokenizer
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : List[Any] =True
def __a ( self :Union[str, Any]) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = SpeechTaTokenizer(_lowercase)
UpperCAmelCase_ = AddedToken('''<mask>''' , lstrip=_lowercase , rstrip=_lowercase)
UpperCAmelCase_ = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token})
tokenizer.add_tokens(['''<ctc_blank>'''])
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Tuple , _lowercase :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = '''this is a test'''
UpperCAmelCase_ = '''this is a test'''
return input_text, output_text
def __a ( self :List[Any] , _lowercase :str , _lowercase :List[str]=False , _lowercase :Union[str, Any]=20 , _lowercase :Any=5) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_input_output_texts(_lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase)
return text, ids
def __a ( self :Dict) -> str:
UpperCAmelCase_ = '''<pad>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :int) -> Dict:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(vocab_keys[-4] , '''œ''')
self.assertEqual(vocab_keys[-2] , '''<mask>''')
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''')
self.assertEqual(len(_lowercase) , 81)
def __a ( self :Dict) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 79)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.get_tokenizers(do_lower_case=_lowercase)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(_lowercase)
self.assertNotEqual(_lowercase , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase_ = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
UpperCAmelCase_ = tokenizer.add_tokens(_lowercase)
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(_lowercase)
self.assertNotEqual(_lowercase , 0)
self.assertEqual(_lowercase , _lowercase)
self.assertEqual(_lowercase , len(_lowercase))
self.assertEqual(_lowercase , all_size + len(_lowercase))
UpperCAmelCase_ = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_lowercase)
self.assertGreaterEqual(len(_lowercase) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
UpperCAmelCase_ = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
UpperCAmelCase_ = tokenizer.add_special_tokens(_lowercase)
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(_lowercase)
self.assertNotEqual(_lowercase , 0)
self.assertEqual(_lowercase , _lowercase)
self.assertEqual(_lowercase , len(_lowercase))
self.assertEqual(_lowercase , all_size_a + len(_lowercase))
UpperCAmelCase_ = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_lowercase)
self.assertGreaterEqual(len(_lowercase) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
def __a ( self :Any) -> List[str]:
pass
def __a ( self :Any) -> Tuple:
pass
def __a ( self :Dict) -> Dict:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
# fmt: off
self.assertListEqual(_lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''])
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
# fmt: off
self.assertListEqual(_lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26])
# fmt: on
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''])
@slow
def __a ( self :Any) -> List[Any]:
# Use custom sequence because this tokenizer does not handle numbers.
UpperCAmelCase_ = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
UpperCAmelCase_ = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_lowercase , )
| 561
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list:
snake_case__ = []
snake_case__ , snake_case__ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
snake_case__ = result + left + right
return input_list
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list:
if len(__lowerCAmelCase ) <= 1:
return input_list
snake_case__ = list(__lowerCAmelCase )
# iteration for two-way merging
snake_case__ = 2
while p <= len(__lowerCAmelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ):
snake_case__ = i
snake_case__ = i + p - 1
snake_case__ = (low + high + 1) // 2
snake_case__ = merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# final merge of last two parts
if p * 2 >= len(__lowerCAmelCase ):
snake_case__ = i
snake_case__ = merge(__lowerCAmelCase , 0 , __lowerCAmelCase , len(__lowerCAmelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
lowerCamelCase__ : str = []
else:
lowerCamelCase__ : List[Any] = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 33
|
'''simple docstring'''
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(lowercase_ , lowercase_ ):
return 0
elif n == 2:
return 1
else:
lowercase =[0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
lowercase =0
lowercase =2
while digits < n:
index += 1
lowercase =len(str(fibonacci(lowercase_ ) ) )
return index
def UpperCamelCase ( lowercase_ : int = 1_0_0_0 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(lowercase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 72
| 0
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _snake_case ( _snake_case : Dict ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _snake_case ( ):
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCAmelCase : str = [1, 2, 3]
with pytest.raises(_snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(_snake_case , _snake_case , num_proc=2 )
with pytest.raises(_snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(_snake_case , _snake_case , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def _snake_case ( _snake_case : str ):
lowerCAmelCase : Any = [1, 2]
lowerCAmelCase : List[str] = {'''a''': 1, '''b''': 2}
lowerCAmelCase : int = {'''a''': [1, 2], '''b''': [3, 4]}
lowerCAmelCase : str = {'''a''': {'''1''': 1}, '''b''': 2}
lowerCAmelCase : List[str] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowerCAmelCase : Optional[Any] = [2, 3]
lowerCAmelCase : Optional[Any] = {'''a''': 2, '''b''': 3}
lowerCAmelCase : Optional[int] = {'''a''': [2, 3], '''b''': [4, 5]}
lowerCAmelCase : str = {'''a''': {'''1''': 2}, '''b''': 3}
lowerCAmelCase : Any = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(_snake_case , _snake_case , num_proc=_snake_case ) == expected_map_nested_sa
assert map_nested(_snake_case , _snake_case , num_proc=_snake_case ) == expected_map_nested_sa
assert map_nested(_snake_case , _snake_case , num_proc=_snake_case ) == expected_map_nested_sa
assert map_nested(_snake_case , _snake_case , num_proc=_snake_case ) == expected_map_nested_sa
assert map_nested(_snake_case , _snake_case , num_proc=_snake_case ) == expected_map_nested_sa
| 710
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case_( a__ ):
__UpperCamelCase = '''philschmid/bart-large-cnn-samsum'''
__UpperCamelCase = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
__UpperCamelCase = '''summarizer'''
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = ['''text''']
__UpperCamelCase = ['''text''']
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int ):
return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' , truncation=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str ):
return self.model.generate(**UpperCamelCase_ )[0]
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple ):
return self.pre_processor.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
| 637
| 0
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase_ : int = datasets.utils.logging.get_logger(__name__)
lowercase_ : Tuple = ['names', 'prefix']
lowercase_ : str = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
lowercase_ : Optional[int] = ['encoding_errors', 'on_bad_lines']
lowercase_ : List[Any] = ['date_format']
@dataclass
class _lowerCamelCase ( datasets.BuilderConfig ):
__a = ","
__a = None
__a = "infer"
__a = None
__a = None
__a = None
__a = None
__a = None
__a = True
__a = None
__a = None
__a = None
__a = None
__a = False
__a = None
__a = None
__a = None
__a = True
__a = True
__a = False
__a = True
__a = None
__a = "."
__a = None
__a = '"'
__a = 0
__a = None
__a = None
__a = None
__a = None
__a = True
__a = True
__a = 0
__a = True
__a = False
__a = None
__a = 10000
__a = None
__a = "strict"
__a = "error"
__a = None
def UpperCamelCase_ ( self ) -> Dict:
if self.delimiter is not None:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.delimiter
if self.column_names is not None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.column_names
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: str= {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCamelCase ( datasets.ArrowBasedBuilder ):
__a = CsvConfig
def UpperCamelCase_ ( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
SCREAMING_SNAKE_CASE__: Optional[int]= dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase , (str, list, tuple) ):
SCREAMING_SNAKE_CASE__: Any= data_files
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Tuple= [files]
SCREAMING_SNAKE_CASE__: str= [dl_manager.iter_files(lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
SCREAMING_SNAKE_CASE__: List[Any]= []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= [files]
SCREAMING_SNAKE_CASE__: Optional[int]= [dl_manager.iter_files(lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase , gen_kwargs={'''files''': files} ) )
return splits
def UpperCamelCase_ ( self , lowerCAmelCase ) -> pa.Table:
if self.config.features is not None:
SCREAMING_SNAKE_CASE__: int= self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
SCREAMING_SNAKE_CASE__: Optional[int]= pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE__: Optional[int]= table_cast(lowerCAmelCase , lowerCAmelCase )
return pa_table
def UpperCamelCase_ ( self , lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: str= self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
SCREAMING_SNAKE_CASE__: str= (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE__: List[Any]= pd.read_csv(lowerCAmelCase , iterator=lowerCAmelCase , dtype=lowerCAmelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: int= pa.Table.from_pandas(lowerCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(lowerCAmelCase )}: {e}' )
raise
| 64
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 64
| 1
|
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __UpperCamelCase ( _A : str , _A : str , _A : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Any = AlbertConfig.from_json_file(_A )
print(F"Building PyTorch model from configuration: {config}" )
lowerCAmelCase : Optional[int] = AlbertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_A , _A , _A )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCAmelCase : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 646
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class lowerCAmelCase ( a ):
_lowerCamelCase : List[str] = """xlm-roberta"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : List[Any] = layer_norm_eps
lowerCAmelCase : Union[str, Any] = position_embedding_type
lowerCAmelCase : Union[str, Any] = use_cache
lowerCAmelCase : List[str] = classifier_dropout
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646
| 1
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
__UpperCAmelCase = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
__UpperCAmelCase = {
'''ctrl''': 256,
}
__UpperCAmelCase = {
'''Pregnancy''': 168_629,
'''Christianity''': 7_675,
'''Explain''': 106_423,
'''Fitness''': 63_440,
'''Saving''': 63_163,
'''Ask''': 27_171,
'''Ass''': 95_985,
'''Joke''': 163_509,
'''Questions''': 45_622,
'''Thoughts''': 49_605,
'''Retail''': 52_342,
'''Feminism''': 164_338,
'''Writing''': 11_992,
'''Atheism''': 192_263,
'''Netflix''': 48_616,
'''Computing''': 39_639,
'''Opinion''': 43_213,
'''Alone''': 44_967,
'''Funny''': 58_917,
'''Gaming''': 40_358,
'''Human''': 4_088,
'''India''': 1_331,
'''Joker''': 77_138,
'''Diet''': 36_206,
'''Legal''': 11_859,
'''Norman''': 4_939,
'''Tip''': 72_689,
'''Weight''': 52_343,
'''Movies''': 46_273,
'''Running''': 23_425,
'''Science''': 2_090,
'''Horror''': 37_793,
'''Confession''': 60_572,
'''Finance''': 12_250,
'''Politics''': 16_360,
'''Scary''': 191_985,
'''Support''': 12_654,
'''Technologies''': 32_516,
'''Teenage''': 66_160,
'''Event''': 32_769,
'''Learned''': 67_460,
'''Notion''': 182_770,
'''Wikipedia''': 37_583,
'''Books''': 6_665,
'''Extract''': 76_050,
'''Confessions''': 102_701,
'''Conspiracy''': 75_932,
'''Links''': 63_674,
'''Narcissus''': 150_425,
'''Relationship''': 54_766,
'''Relationships''': 134_796,
'''Reviews''': 41_671,
'''News''': 4_256,
'''Translation''': 26_820,
'''multilingual''': 128_406,
}
def _snake_case ( A ) -> Optional[int]:
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
lowerCAmelCase__ = set(A )
return pairs
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : List[Any] = VOCAB_FILES_NAMES
lowercase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Dict = CONTROL_CODES
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="<unk>" , **lowerCamelCase_ ) -> List[Any]:
super().__init__(unk_token=lowerCamelCase_ , **lowerCamelCase_ )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(lowerCamelCase_ )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
lowerCAmelCase__ = {}
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return len(self.encoder )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[Any]:
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(lowerCamelCase_ )
lowerCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase__ = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(lowerCamelCase_ ):
try:
lowerCAmelCase__ = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(lowerCamelCase_ )
lowerCAmelCase__ = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(lowerCamelCase_ )
lowerCAmelCase__ = '''@@ '''.join(lowerCamelCase_ )
lowerCAmelCase__ = word[:-4]
lowerCAmelCase__ = word
return word
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = []
lowerCAmelCase__ = re.findall(r'''\S+\n?''' , lowerCamelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase_ ).split(''' ''' ) ) )
return split_tokens
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Union[str, Any]:
return self.decoder.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = ''' '''.join(lowerCamelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
lowerCAmelCase__ = 0
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase__ = token_index
writer.write(''' '''.join(lowerCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 90
|
'''simple docstring'''
import math
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
_A = []
_A = 2
_A = int(math.sqrt(__snake_case ) ) # Size of every segment
_A = [True] * (end + 1)
_A = []
while start <= end:
if temp[start] is True:
in_prime.append(__snake_case )
for i in range(start * start , end + 1 , __snake_case ):
_A = False
start += 1
prime += in_prime
_A = end + 1
_A = min(2 * end , __snake_case )
while low <= n:
_A = [True] * (high - low + 1)
for each in in_prime:
_A = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__snake_case , high + 1 , __snake_case ):
_A = False
for j in range(len(__snake_case ) ):
if temp[j] is True:
prime.append(j + low )
_A = high + 1
_A = min(high + end , __snake_case )
return prime
print(sieve(10**6))
| 107
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str
__lowerCamelCase: int
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(_UpperCamelCase ) )]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
lowercase_ : Any = all_rotations(_UpperCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
lowercase_ : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_UpperCamelCase ),
}
return response
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
lowercase_ : str = int(_UpperCamelCase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(_UpperCamelCase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
lowercase_ : Union[str, Any] = [""] * len(_UpperCamelCase )
for _ in range(len(_UpperCamelCase ) ):
for i in range(len(_UpperCamelCase ) ):
lowercase_ : Tuple = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCamelCase__ = 'Provide a string that I will generate its BWT transform: '
UpperCamelCase__ = input(entry_msg).strip()
UpperCamelCase__ = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result["bwt_string"]}'"""
)
UpperCamelCase__ = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
f"""we get original string '{original_string}'"""
)
| 640
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowercase_ : Union[str, Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Optional[Any] = inner
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 640
| 1
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case ( A__ ):
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=True , a=False , a=False , a=False , a=2 , a=99 , a=0 , a=32 , a=5 , a=4 , a=0.1 , a=0.1 , a=512 , a=12 , a=2 , a=0.02 , a=3 , a=4 , a="last" , a=None , a=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_lengths
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = gelu_activation
SCREAMING_SNAKE_CASE = sinusoidal_embeddings
SCREAMING_SNAKE_CASE = causal
SCREAMING_SNAKE_CASE = asm
SCREAMING_SNAKE_CASE = n_langs
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_special
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = summary_type
SCREAMING_SNAKE_CASE = use_proj
SCREAMING_SNAKE_CASE = scope
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , 2).float()
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Dict:
SCREAMING_SNAKE_CASE = FlaubertModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , lengths=a , langs=a)
SCREAMING_SNAKE_CASE = model(a , langs=a)
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Any:
SCREAMING_SNAKE_CASE = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Dict:
SCREAMING_SNAKE_CASE = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
SCREAMING_SNAKE_CASE = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((SCREAMING_SNAKE_CASE) , ) = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE = model(a , start_positions=a , end_positions=a)
((SCREAMING_SNAKE_CASE) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> int:
SCREAMING_SNAKE_CASE = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Any:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> Tuple:
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : Any = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : Any = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a) -> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=False) -> Any:
SCREAMING_SNAKE_CASE = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = FlaubertModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , emb_dim=37)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(config=a)
SCREAMING_SNAKE_CASE = self._prepare_for_class(a , a)
SCREAMING_SNAKE_CASE = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu'), inputs_dict['attention_mask'].to('cpu')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt'))
SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(a , 'traced_model.pt') , map_location=a)
loaded(inputs_dict['input_ids'].to(a) , inputs_dict['attention_mask'].to(a))
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased')
SCREAMING_SNAKE_CASE = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(a)[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 11, 768))
self.assertEqual(output.shape , a)
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1E-4))
| 73
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {
"""configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""],
"""tokenization_lxmert""": ["""LxmertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["""LxmertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"""LxmertEncoder""",
"""LxmertForPreTraining""",
"""LxmertForQuestionAnswering""",
"""LxmertModel""",
"""LxmertPreTrainedModel""",
"""LxmertVisualFeatureEncoder""",
"""LxmertXLayer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
"""TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLxmertForPreTraining""",
"""TFLxmertMainLayer""",
"""TFLxmertModel""",
"""TFLxmertPreTrainedModel""",
"""TFLxmertVisualFeatureEncoder""",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 218
| 0
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class __lowercase ( unittest.TestCase ):
def _a ( self) -> List[Any]:
__snake_case = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__snake_case = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__snake_case = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
__snake_case = tf_top_k_top_p_filtering(lowercase_ , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4)
__snake_case = output[output != -float('inf')]
__snake_case = tf.cast(
tf.where(tf.not_equal(lowercase_ , tf.constant(-float('inf') , dtype=tf.floataa))) , dtype=tf.intaa , )
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-12)
tf.debugging.assert_equal(lowercase_ , lowercase_)
@require_tf
class __lowercase ( unittest.TestCase , lowerCamelCase__ ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
__UpperCAmelCase = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def _a ( self) -> Optional[Any]:
# TF-only test: tf.saved_model export
__snake_case = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2')
__snake_case = 2
__snake_case = 2
class __lowercase ( tf.Module ):
def __init__( self , lowercase_) -> List[str]:
super(lowercase_ , self).__init__()
__snake_case = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids'),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask'),
) , jit_compile=lowercase_ , )
def _a ( self , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = self.model.generate(
input_ids=lowercase_ , attention_mask=lowercase_ , max_new_tokens=lowercase_ , return_dict_in_generate=lowercase_ , )
return {"sequences": outputs["sequences"]}
__snake_case = [[2, 0], [1_0_2, 1_0_3]]
__snake_case = [[1, 0], [1, 1]]
__snake_case = DummyModel(model=lowercase_)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowercase_ , lowercase_ , signatures={'serving_default': dummy_model.serving})
__snake_case = tf.saved_model.load(lowercase_).signatures['serving_default']
for batch_size in range(1 , len(lowercase_) + 1):
__snake_case = {
'input_ids': tf.constant(dummy_input_ids[:batch_size]),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size]),
}
__snake_case = serving_func(**lowercase_)['sequences']
__snake_case = test_model.generate(**lowercase_ , max_new_tokens=lowercase_)
tf.debugging.assert_equal(lowercase_ , lowercase_)
@slow
def _a ( self) -> Any:
# TF-only test: tf.saved_model export
__snake_case = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2')
__snake_case = 1
__snake_case = 2
class __lowercase ( tf.Module ):
def __init__( self , lowercase_) -> Union[str, Any]:
super(lowercase_ , self).__init__()
__snake_case = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids'),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask'),
) , jit_compile=lowercase_ , )
def _a ( self , lowercase_ , lowercase_) -> Dict:
__snake_case = self.model.generate(
input_ids=lowercase_ , attention_mask=lowercase_ , max_new_tokens=lowercase_ , return_dict_in_generate=lowercase_ , )
return {"sequences": outputs["sequences"]}
__snake_case = [[2], [1_0_2, 1_0_3]]
__snake_case = [[1], [1, 1]]
__snake_case = DummyModel(model=lowercase_)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowercase_ , lowercase_ , signatures={'serving_default': dummy_model.serving})
__snake_case = tf.saved_model.load(lowercase_).signatures['serving_default']
for input_row in range(len(lowercase_)):
__snake_case = {
'input_ids': tf.constant([dummy_input_ids[input_row]]),
'attention_mask': tf.constant([dummy_attention_masks[input_row]]),
}
__snake_case = serving_func(**lowercase_)['sequences']
__snake_case = test_model.generate(**lowercase_ , max_new_tokens=lowercase_)
tf.debugging.assert_equal(lowercase_ , lowercase_)
@slow
@require_tensorflow_text
def _a ( self) -> Optional[int]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=lowercase_)
class __lowercase ( tf.keras.layers.Layer ):
def __init__( self) -> Dict:
super().__init__()
__snake_case = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(lowercase_ , 'spiece.model') , 'rb').read())
__snake_case = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5')
def _a ( self , lowercase_ , *lowercase_ , **lowercase_) -> Optional[Any]:
__snake_case = self.tokenizer.tokenize(lowercase_)
__snake_case , __snake_case = text.pad_model_inputs(
lowercase_ , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id)
__snake_case = self.model.generate(input_ids=lowercase_ , attention_mask=lowercase_)
return self.tokenizer.detokenize(lowercase_)
__snake_case = CompleteSentenceTransformer()
__snake_case = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs')
__snake_case = complete_model(lowercase_)
__snake_case = tf.keras.Model(lowercase_ , lowercase_)
keras_model.save(lowercase_)
def _a ( self) -> Any:
# Has PT equivalent: this test relies on random sampling
__snake_case = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 1_0,
'temperature': 0.7,
}
__snake_case = 1_4
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
__snake_case = 'Hello, my dog is cute and'
__snake_case = tokenizer(lowercase_ , return_tensors='tf')
__snake_case = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2')
__snake_case = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0'):
tf.random.set_seed(0)
__snake_case = model.generate(**lowercase_ , eos_token_id=lowercase_ , **lowercase_)
self.assertTrue(expectation == len(generated_tokens[0]))
__snake_case = [6_3_8, 1_9_8]
with tf.device(':/CPU:0'):
tf.random.set_seed(0)
__snake_case = model.generate(**lowercase_ , eos_token_id=lowercase_ , **lowercase_)
self.assertTrue(expectation == len(generated_tokens[0]))
def _a ( self) -> Union[str, Any]:
# Has PT equivalent: ample use of framework-specific code
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart')
__snake_case = 'Hugging Face is a technology company based in New York and Paris.'
__snake_case = bart_tokenizer(lowercase_ , return_tensors='tf').input_ids
__snake_case = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart')
__snake_case = bart_model.generate(lowercase_).numpy()
class __lowercase ( lowerCamelCase__ ):
def _a ( self , lowercase_ , lowercase_=None , **lowercase_) -> Dict:
return super().call(lowercase_ , **lowercase_)
__snake_case = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart')
__snake_case = bart_model.generate(lowercase_ , foo='bar').numpy()
self.assertTrue(np.array_equal(lowercase_ , lowercase_))
class __lowercase ( bart_model.model.encoder.__class__ ):
def _a ( self , lowercase_ , **lowercase_) -> Optional[Any]:
return super().call(lowercase_ , **lowercase_)
__snake_case = FakeEncoder(bart_model.config , bart_model.model.shared)
__snake_case = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__snake_case = bart_model.generate(lowercase_).numpy()
with self.assertRaises(lowercase_):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(lowercase_ , foo='bar')
| 676
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676
| 1
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , lowercase):
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = load_tool("text-classification" )
self.tool.setup()
_UpperCAmelCase = load_tool("text-classification" , remote=__UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 684
|
from __future__ import annotations
from collections import namedtuple
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple:
_UpperCAmelCase = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Union[str, Any] , **_a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple , **_a : List[Any] ) -> List[Any]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str , **_a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''lower newer'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''lower newer'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =CLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''lower newer'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 191
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
snake_case_ : Optional[Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 191
| 1
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A ( UpperCAmelCase_ ):
def __init__(self : Tuple , *__UpperCAmelCase : int , __UpperCAmelCase : Any=None , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = eval_examples
UpperCAmelCase__ = post_process_function
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : str = "eval" ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase__ = self.get_eval_dataloader(__UpperCAmelCase )
UpperCAmelCase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase__ = self.compute_metrics
UpperCAmelCase__ = None
UpperCAmelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase__ = time.time()
try:
UpperCAmelCase__ = eval_loop(
__UpperCAmelCase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
UpperCAmelCase__ = compute_metrics
UpperCAmelCase__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase__ = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , output.predictions )
UpperCAmelCase__ = self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCAmelCase__ = metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
else:
UpperCAmelCase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , __UpperCAmelCase )
return metrics
def lowercase_ (self : int , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : str=None , __UpperCAmelCase : str = "test" ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.get_test_dataloader(__UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase__ = self.compute_metrics
UpperCAmelCase__ = None
UpperCAmelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCAmelCase__ = time.time()
try:
UpperCAmelCase__ = eval_loop(
__UpperCAmelCase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
UpperCAmelCase__ = compute_metrics
UpperCAmelCase__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase__ = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , output.predictions , "predict" )
UpperCAmelCase__ = self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCAmelCase__ = metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__UpperCAmelCase )
| 486
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 486
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ : Optional[int] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowercase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 43
|
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowercase__ : List[Any] = "src/transformers"
# Matches is_xxx_available()
lowercase__ : Optional[Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
lowercase__ : Any = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase__ : Union[str, Any] = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
lowercase__ : Optional[int] = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
lowercase__ : List[str] = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase__ : Any = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase__ : List[Any] = re.compile(R"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase__ : Optional[Any] = re.compile(R"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
lowercase__ : Union[str, Any] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
lowercase__ : int = re.compile(R"^\s*try:")
# Catches a line with else:
lowercase__ : Any = re.compile(R"^\s*else:")
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if _re_test_backend.search(_UpperCamelCase ) is None:
return None
UpperCAmelCase_ = [b[0] for b in _re_backend.findall(_UpperCamelCase )]
backends.sort()
return "_and_".join(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = 0
while line_index < len(_UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase_ = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase_ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_UpperCamelCase ):
UpperCAmelCase_ = _re_one_line_import_struct.search(_UpperCamelCase ).groups()[0]
UpperCAmelCase_ = re.findall(R'''\[([^\]]+)\]''' , _UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCAmelCase_ = _re_import_struct_key_value.search(_UpperCamelCase )
if single_line_import_search is not None:
UpperCAmelCase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase_ = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCAmelCase_ = lines[line_index]
if _re_import_struct_add_one.search(_UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_UpperCamelCase ) is not None:
UpperCAmelCase_ = _re_import_struct_add_many.search(_UpperCamelCase ).groups()[0].split(''', ''' )
UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif _re_between_brackets.search(_UpperCamelCase ) is not None:
UpperCAmelCase_ = _re_between_brackets.search(_UpperCamelCase ).groups()[0].split(''', ''' )
UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif _re_quote_object.search(_UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(_UpperCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase_ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase_ = []
while (
line_index < len(_UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _re_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase_ = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _re_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase_ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
def find_duplicates(_UpperCamelCase : Tuple ):
return [k for k, v in collections.Counter(_UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase_ = []
for key in import_dict_objects.keys():
UpperCAmelCase_ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCAmelCase_ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase_ = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = []
for root, _, files in os.walk(_UpperCamelCase ):
if "__init__.py" in files:
UpperCAmelCase_ = os.path.join(_UpperCamelCase , '''__init__.py''' )
UpperCAmelCase_ = parse_init(_UpperCamelCase )
if objects is not None:
UpperCAmelCase_ = analyze_results(*_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
UpperCAmelCase_ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
raise ValueError('''\n\n'''.join(_UpperCamelCase ) )
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = []
for path, directories, files in os.walk(_UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCAmelCase_ = str((Path(_UpperCamelCase ) / folder).relative_to(_UpperCamelCase ) )
UpperCAmelCase_ = short_path.replace(os.path.sep , '''.''' )
submodules.append(_UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase_ = str((Path(_UpperCamelCase ) / fname).relative_to(_UpperCamelCase ) )
UpperCAmelCase_ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_UpperCamelCase )
return submodules
lowercase__ : Union[str, Any] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def __lowerCamelCase ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
UpperCAmelCase_ = direct_transformers_import(_UpperCamelCase )
UpperCAmelCase_ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f:
UpperCAmelCase_ = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , _UpperCamelCase ) ) )
UpperCAmelCase_ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_UpperCamelCase ) > 0:
UpperCAmelCase_ = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 43
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Union[str, Any]=1_0 ) -> List[Any]:
'''simple docstring'''
lowercase =[]
for _ in range(lowercase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : str=1_0 ) -> Dict:
'''simple docstring'''
lowercase =[]
for step in range(lowercase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase =os.path.join(lowercase_ , '''schedule.bin''' )
torch.save(scheduler.state_dict() , lowercase_ )
lowercase =torch.load(lowercase_ )
scheduler.load_state_dict(lowercase_ )
return lrs
@require_torch
class __magic_name__ ( unittest.TestCase ):
def _A( self , snake_case_ , snake_case_ , snake_case_ ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for a, b in zip(snake_case_ , snake_case_ ):
self.assertAlmostEqual(snake_case_ , snake_case_ , delta=snake_case_ )
def _A( self ):
lowercase =torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case_ )
lowercase =torch.tensor([0.4, 0.2, -0.5] )
lowercase =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowercase =AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_00 ):
lowercase =criterion(snake_case_ , snake_case_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _A( self ):
lowercase =torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case_ )
lowercase =torch.tensor([0.4, 0.2, -0.5] )
lowercase =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowercase =Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case_ , weight_decay=0.0 , relative_step=snake_case_ , scale_parameter=snake_case_ , warmup_init=snake_case_ , )
for _ in range(10_00 ):
lowercase =criterion(snake_case_ , snake_case_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class __magic_name__ ( unittest.TestCase ):
UpperCamelCase__ = nn.Linear(50 , 50 ) if is_torch_available() else None
UpperCamelCase__ = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCamelCase__ = 10
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for a, b in zip(snake_case_ , snake_case_ ):
self.assertAlmostEqual(snake_case_ , snake_case_ , delta=snake_case_ , msg=snake_case_ )
def _A( self ):
lowercase ={'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowercase ={
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
lowercase , lowercase =data
lowercase =scheduler_func(self.optimizer , **snake_case_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowercase =unwrap_schedule(snake_case_ , self.num_steps )
self.assertListAlmostEqual(
snake_case_ , snake_case_ , tol=1E-2 , msg=f'failed for {scheduler_func} in normal scheduler' , )
lowercase =scheduler_func(self.optimizer , **snake_case_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case_ ) # wrap to test picklability of the schedule
lowercase =unwrap_and_save_reload_schedule(snake_case_ , self.num_steps )
self.assertListEqual(snake_case_ , snake_case_ , msg=f'failed for {scheduler_func} in save and reload' )
class __magic_name__ :
def __init__( self , snake_case_ ):
lowercase =fn
def __call__( self , *snake_case_ , **snake_case_ ):
return self.fn(*snake_case_ , **snake_case_ )
@classmethod
def _A( self , snake_case_ ):
lowercase =list(map(self , scheduler.lr_lambdas ) )
| 72
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Tuple = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33
| 0
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : List[Any] = 'codegen'
__lowerCAmelCase : Dict = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , SCREAMING_SNAKE_CASE_=5_04_00 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=28 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="gelu_new" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Dict = vocab_size
lowercase__ : List[str] = n_ctx
lowercase__ : List[str] = n_positions
lowercase__ : List[Any] = n_embd
lowercase__ : Dict = n_layer
lowercase__ : List[str] = n_head
lowercase__ : List[str] = n_inner
lowercase__ : int = rotary_dim
lowercase__ : Dict = activation_function
lowercase__ : Any = resid_pdrop
lowercase__ : Tuple = embd_pdrop
lowercase__ : int = attn_pdrop
lowercase__ : Dict = layer_norm_epsilon
lowercase__ : str = initializer_range
lowercase__ : List[Any] = use_cache
lowercase__ : Dict = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "default" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ , task=SCREAMING_SNAKE_CASE_ , patching_specs=SCREAMING_SNAKE_CASE_ , use_past=SCREAMING_SNAKE_CASE_)
if not getattr(self._config , """pad_token_id""" , SCREAMING_SNAKE_CASE_):
# TODO: how to do that better?
lowercase__ : Optional[Any] = 0
@property
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}})
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction="""inputs""")
lowercase__ : Union[str, Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowercase__ : str = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowercase__ ( self):
'''simple docstring'''
return self._config.n_layer
@property
def lowercase__ ( self):
'''simple docstring'''
return self._config.n_head
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , ):
'''simple docstring'''
lowercase__ : Any = super(SCREAMING_SNAKE_CASE_ , self).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_)
# We need to order the input in the way they appears in the forward()
lowercase__ : Dict = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""")
else:
import torch
lowercase__ : Tuple = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowercase__ : int = seqlen + 2
lowercase__ : Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__ : Optional[int] = [
(torch.zeros(SCREAMING_SNAKE_CASE_), torch.zeros(SCREAMING_SNAKE_CASE_)) for _ in range(self.num_layers)
]
lowercase__ : Optional[int] = common_inputs["""attention_mask"""]
if self.use_past:
lowercase__ : str = ordered_inputs["""attention_mask"""].dtype
lowercase__ : int = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_)] , dim=1)
return ordered_inputs
@property
def lowercase__ ( self):
'''simple docstring'''
return 13
| 700
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
if components is None:
lowercase__ : List[str] = []
lowercase__ : Dict = list(SCREAMING_SNAKE_CASE_)
def __len__( self):
'''simple docstring'''
return len(self.__components)
def __str__( self):
'''simple docstring'''
return "(" + ",".join(map(SCREAMING_SNAKE_CASE_ , self.__components)) + ")"
def __add__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[Any] = len(self)
if size == len(SCREAMING_SNAKE_CASE_):
lowercase__ : List[str] = [self.__components[i] + other.component(SCREAMING_SNAKE_CASE_) for i in range(SCREAMING_SNAKE_CASE_)]
return Vector(SCREAMING_SNAKE_CASE_)
else:
raise Exception("""must have the same size""")
def __sub__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = len(self)
if size == len(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = [self.__components[i] - other.component(SCREAMING_SNAKE_CASE_) for i in range(SCREAMING_SNAKE_CASE_)]
return Vector(SCREAMING_SNAKE_CASE_)
else: # error case
raise Exception("""must have the same size""")
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
...
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
...
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , (float, int)):
lowercase__ : Optional[int] = [c * other for c in self.__components]
return Vector(SCREAMING_SNAKE_CASE_)
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and len(self) == len(SCREAMING_SNAKE_CASE_):
lowercase__ : Dict = len(self)
lowercase__ : Optional[Any] = [self.__components[i] * other.component(SCREAMING_SNAKE_CASE_) for i in range(SCREAMING_SNAKE_CASE_)]
return sum(SCREAMING_SNAKE_CASE_)
else: # error case
raise Exception("""invalid operand!""")
def lowercase__ ( self):
'''simple docstring'''
return Vector(self.__components)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and -len(self.__components) <= i < len(self.__components):
return self.__components[i]
else:
raise Exception("""index out of range""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
assert -len(self.__components) <= pos < len(self.__components)
lowercase__ : List[Any] = value
def lowercase__ ( self):
'''simple docstring'''
if len(self.__components) == 0:
raise Exception("""Vector is empty""")
lowercase__ : Union[str, Any] = [c**2 for c in self.__components]
return math.sqrt(sum(SCREAMING_SNAKE_CASE_))
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False):
'''simple docstring'''
lowercase__ : Union[str, Any] = self * other
lowercase__ : Optional[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den))
else:
return math.acos(num / den)
def UpperCamelCase ( lowercase_ ) -> Vector:
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ )
return Vector([0] * dimension )
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Vector:
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ ) and (isinstance(lowercase_ , lowercase_ ))
lowercase__ : Union[str, Any] = [0] * dimension
lowercase__ : Any = 1
return Vector(lowercase_ )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Vector:
'''simple docstring'''
assert (
isinstance(lowercase_ , lowercase_ )
and isinstance(lowercase_ , lowercase_ )
and (isinstance(lowercase_ , (int, float) ))
)
return x * scalar + y
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Vector:
'''simple docstring'''
random.seed(lowercase_ )
lowercase__ : int = [random.randint(lowercase_ , lowercase_ ) for _ in range(lowercase_ )]
return Vector(lowercase_ )
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = matrix
lowercase__ : Any = w
lowercase__ : Any = h
def __str__( self):
'''simple docstring'''
lowercase__ : str = """"""
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width - 1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def __add__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
lowercase__ : Tuple = []
for i in range(self.__height):
lowercase__ : Tuple = [
self.__matrix[i][j] + other.component(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
for j in range(self.__width)
]
matrix.append(SCREAMING_SNAKE_CASE_)
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width , self.__height)
else:
raise Exception("""matrix must have the same dimension!""")
def __sub__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
lowercase__ : Optional[int] = []
for i in range(self.__height):
lowercase__ : List[str] = [
self.__matrix[i][j] - other.component(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
for j in range(self.__width)
]
matrix.append(SCREAMING_SNAKE_CASE_)
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width , self.__height)
else:
raise Exception("""matrices must have the same dimension!""")
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
...
@overload
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
...
def __mul__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): # matrix-vector
if len(SCREAMING_SNAKE_CASE_) == self.__width:
lowercase__ : List[Any] = zero_vector(self.__height)
for i in range(self.__height):
lowercase__ : Union[str, Any] = [
self.__matrix[i][j] * other.component(SCREAMING_SNAKE_CASE_)
for j in range(self.__width)
]
ans.change_component(SCREAMING_SNAKE_CASE_ , sum(SCREAMING_SNAKE_CASE_))
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""")
elif isinstance(SCREAMING_SNAKE_CASE_ , (int, float)): # matrix-scalar
lowercase__ : Tuple = [
[self.__matrix[i][j] * other for j in range(self.__width)]
for i in range(self.__height)
]
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width , self.__height)
return None
def lowercase__ ( self):
'''simple docstring'''
return self.__height
def lowercase__ ( self):
'''simple docstring'''
return self.__width
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
lowercase__ : Tuple = value
else:
raise Exception("""change_component: indices out of bounds""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""")
lowercase__ : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(SCREAMING_SNAKE_CASE_)):
lowercase__ : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(SCREAMING_SNAKE_CASE_ , self.__width - 1 , self.__height - 1).determinant()
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""")
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
else:
raise Exception("""Indices out of bounds""")
def lowercase__ ( self):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""")
if self.__height < 1:
raise Exception("""Matrix has no element""")
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowercase__ : Optional[int] = [
self.__matrix[0][y] * self.cofactor(0 , SCREAMING_SNAKE_CASE_) for y in range(self.__width)
]
return sum(SCREAMING_SNAKE_CASE_)
def UpperCamelCase ( lowercase_ ) -> Matrix:
'''simple docstring'''
lowercase__ : list[list[float]] = [[0] * n for _ in range(lowercase_ )]
return Matrix(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Matrix:
'''simple docstring'''
random.seed(lowercase_ )
lowercase__ : list[list[float]] = [
[random.randint(lowercase_ , lowercase_ ) for _ in range(lowercase_ )] for _ in range(lowercase_ )
]
return Matrix(lowercase_ , lowercase_ , lowercase_ )
| 495
| 0
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowercase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowercase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowercase : set[int] = {ord(char) for char in VALID_CHARS}
lowercase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCAmelCase__ ( _a : Optional[Any] , _a : Any ):
snake_case_ : str = ""
snake_case_ : int
snake_case_ : int
snake_case_ : int
for keychar, cipherchar in zip(cycle(_a ) , _a ):
snake_case_ : Dict = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_a )
return decoded
def lowerCAmelCase__ ( _a : Dict ):
snake_case_ : list[str] = []
for key in product(_a , repeat=3 ):
snake_case_ : int = try_key(_a , _a )
if encoded is not None:
possibles.append(_a )
return possibles
def lowerCAmelCase__ ( _a : int , _a : int ):
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCAmelCase__ ( _a : Tuple = "p059_cipher.txt" ):
snake_case_ : list[int]
snake_case_ : list[str]
snake_case_ : str
snake_case_ : str
snake_case_ : str = Path(_a ).parent.joinpath(_a ).read_text(encoding="utf-8" )
snake_case_ : str = [int(_a ) for number in data.strip().split("," )]
snake_case_ : List[str] = filter_valid_chars(_a )
for common_word in COMMON_WORDS:
snake_case_ : Any = filter_common_word(_a , _a )
if len(_a ) == 1:
break
snake_case_ : List[str] = possibles[0]
return sum(ord(_a ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 568
|
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : Dict = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase : Tuple = input_file.read()
UpperCAmelCase : List[Any] = regexp.search(snake_case )
return match
def A_ ( self , snake_case ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as input_file:
UpperCAmelCase : List[str] = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase : List[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase : str = regexp.finditer(snake_case )
UpperCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = Path("./datasets" )
UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Path("./datasets" )
UpperCAmelCase : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 679
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( snake_case , snake_case , snake_case )-> Optional[int]:
# Construct model
if gpta_config_file == "":
_lowerCamelCase = GPTaConfig()
else:
_lowerCamelCase = GPTaConfig.from_json_file(snake_case )
_lowerCamelCase = GPTaModel(snake_case )
# Load weights from numpy
load_tf_weights_in_gpta(snake_case , snake_case , snake_case )
# Save pytorch-model
_lowerCamelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_lowerCamelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , snake_case )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A_ : Union[str, Any] =parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 717
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A_ : Tuple =imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A_ : Union[str, Any] =cvtColor(img, COLOR_BGR2GRAY)
def SCREAMING_SNAKE_CASE_ ( )-> Union[str, Any]:
_lowerCamelCase = cn.convert_to_negative(snake_case )
# assert negative_img array for at least one True
assert negative_img.any()
def SCREAMING_SNAKE_CASE_ ( )-> Any:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(snake_case , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def SCREAMING_SNAKE_CASE_ ( )-> int:
_lowerCamelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def SCREAMING_SNAKE_CASE_ ( )-> Optional[int]:
_lowerCamelCase = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_lowerCamelCase = canny.canny(snake_case )
# assert canny array for at least one True
assert canny_array.any()
def SCREAMING_SNAKE_CASE_ ( )-> Optional[int]:
assert gg.gaussian_filter(snake_case , 5 , sigma=0.9 ).all()
def SCREAMING_SNAKE_CASE_ ( )-> str:
# laplace diagonals
_lowerCamelCase = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
_lowerCamelCase = conv.img_convolve(snake_case , snake_case ).astype(snake_case )
assert res.any()
def SCREAMING_SNAKE_CASE_ ( )-> Tuple:
assert med.median_filter(snake_case , 3 ).any()
def SCREAMING_SNAKE_CASE_ ( )-> Dict:
_lowerCamelCase , _lowerCamelCase = sob.sobel_filter(snake_case )
assert grad.any() and theta.any()
def SCREAMING_SNAKE_CASE_ ( )-> Any:
_lowerCamelCase = sp.make_sepia(snake_case , 20 )
assert sepia.all()
def SCREAMING_SNAKE_CASE_ ( snake_case : str = "digital_image_processing/image_data/lena_small.jpg" )-> List[Any]:
_lowerCamelCase = bs.Burkes(imread(snake_case , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def SCREAMING_SNAKE_CASE_ ( snake_case : str = "digital_image_processing/image_data/lena_small.jpg" , )-> List[str]:
_lowerCamelCase = rs.NearestNeighbour(imread(snake_case , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def SCREAMING_SNAKE_CASE_ ( )-> List[Any]:
_lowerCamelCase = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
_lowerCamelCase = imread(snake_case , 0 )
# Test for get_neighbors_pixel function() return not None
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = image[x_coordinate][y_coordinate]
_lowerCamelCase = lbp.get_neighbors_pixel(
snake_case , snake_case , snake_case , snake_case )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_lowerCamelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_lowerCamelCase = lbp.local_binary_value(snake_case , snake_case , snake_case )
assert lbp_image.any()
| 222
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class _lowerCamelCase ( _a ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] ="mra"
def __init__( self , UpperCAmelCase=50265 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=1 , UpperCAmelCase=0.02 , UpperCAmelCase=1E-5 , UpperCAmelCase="absolute" , UpperCAmelCase=4 , UpperCAmelCase="full" , UpperCAmelCase=0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
__snake_case : Tuple = vocab_size
__snake_case : Any = max_position_embeddings
__snake_case : Optional[int] = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Dict = initializer_range
__snake_case : Any = type_vocab_size
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : int = position_embedding_type
__snake_case : Tuple = block_per_row
__snake_case : Union[str, Any] = approx_mode
__snake_case : Dict = initial_prior_first_n_blocks
__snake_case : List[Any] = initial_prior_diagonal_n_blocks
| 243
|
'''simple docstring'''
import qiskit
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_a = qiskit.Aer.get_backend('''aer_simulator''' )
_a = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Tuple = half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 22
| 0
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """T5Config"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : jnp.array , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> jnp.ndarray:
__lowercase = jnp.zeros_like(SCREAMING_SNAKE_CASE )
__lowercase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__lowercase = shifted_input_ids.at[:, 0].set(SCREAMING_SNAKE_CASE )
__lowercase = jnp.where(shifted_input_ids == -100 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return shifted_input_ids
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = "mt5"
lowerCAmelCase__ : List[str] = MTaConfig
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = "mt5"
lowerCAmelCase__ : Optional[Any] = MTaConfig
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Dict = "mt5"
lowerCAmelCase__ : List[Any] = MTaConfig
| 700
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : int=5_02_65 , _UpperCAmelCase : Union[str, Any]=7_68 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[str]=30_72 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Tuple=1_28 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : int=64 , _UpperCAmelCase : List[str]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=2_24 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = version.parse("1.12" )
@property
def a__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : Any ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return 12
def a__ ( self : Tuple , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 688
| 0
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = TypeVar('''DatasetType''', Dataset, IterableDataset)
def UpperCAmelCase_ ( _A , _A = None , _A = None , _A = None , _A = None , _A = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_A ):
if not isinstance(_A , (Dataset, IterableDataset) ):
if isinstance(_A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(_A )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_A ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_A ).__name__}.''' )
if i == 0:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = (
(Dataset, IterableDataset) if isinstance(_A , _A ) else (IterableDataset, Dataset)
)
elif not isinstance(_A , _A ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_A , _A , _A , info=_A , split=_A , stopping_strategy=_A )
else:
return _interleave_iterable_datasets(
_A , _A , _A , info=_A , split=_A , stopping_strategy=_A )
def UpperCAmelCase_ ( _A , _A = None , _A = None , _A = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_A ):
if not isinstance(_A , (Dataset, IterableDataset) ):
if isinstance(_A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(_A )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_A ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_A ).__name__}.''' )
if i == 0:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = (
(Dataset, IterableDataset) if isinstance(_A , _A ) else (IterableDataset, Dataset)
)
elif not isinstance(_A , _A ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_A , info=_A , split=_A , axis=_A )
else:
return _concatenate_iterable_datasets(_A , info=_A , split=_A , axis=_A )
| 493
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase__ :
"""simple docstring"""
@staticmethod
def lowercase_ ( *__lowerCamelCase : int , **__lowerCamelCase : List[Any] ) -> List[str]:
pass
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_SCREAMING_SNAKE_CASE : Optional[Any] = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
a = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model=__lowerCamelCase , tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , '''''' ) ) )
SCREAMING_SNAKE_CASE__ = '''What is the placebo?'''
SCREAMING_SNAKE_CASE__ = [
{
'''image''': load_image(__lowerCamelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def lowercase_ ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = dqa_pipeline(__lowerCamelCase , top_k=2 )
self.assertEqual(
__lowerCamelCase , [
[
{'''score''': ANY(__lowerCamelCase ), '''answer''': ANY(__lowerCamelCase ), '''start''': ANY(__lowerCamelCase ), '''end''': ANY(__lowerCamelCase )},
{'''score''': ANY(__lowerCamelCase ), '''answer''': ANY(__lowerCamelCase ), '''start''': ANY(__lowerCamelCase ), '''end''': ANY(__lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''How many cats are there?'''
SCREAMING_SNAKE_CASE__ = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE__ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE__ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , words=__lowerCamelCase , boxes=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase_ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase_ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__lowerCamelCase , revision='''3dc6de3''' , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
SCREAMING_SNAKE_CASE__ = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__lowerCamelCase , revision='''3dc6de3''' , max_seq_len=50 , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
SCREAMING_SNAKE_CASE__ = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
pass
| 493
| 1
|
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 700
|
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class A_ ( _a ):
def __init__( self: int ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." ,__lowerCAmelCase ,)
super().__init__(*__lowerCAmelCase ,**__lowerCAmelCase )
| 46
|
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :List[Any]):
"""simple docstring"""
_lowercase ={}
def UpperCamelCase__ ( self :str):
"""simple docstring"""
print(self.vertex)
for i in self.vertex:
print(snake_case, ' -> ', ' -> '.join([str(snake_case) for j in self.vertex[i]]))
def UpperCamelCase__ ( self :Any, snake_case :int, snake_case :int):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(snake_case)
else:
# else make a new vertex
_lowercase =[to_vertex]
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
_lowercase =[False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if not visited[i]:
self.dfs_recursive(snake_case, snake_case)
def UpperCamelCase__ ( self :List[str], snake_case :int, snake_case :list):
"""simple docstring"""
_lowercase =True
print(snake_case, end=' ')
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(snake_case, snake_case)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 181
| 0
|
import argparse
import copy
def __A ( _A ):
"""simple docstring"""
__a = {}
with open(_A ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__a = []
_list.append([line.split()[1], line.split()[2]] )
__a = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__a = []
_list.append([line.split()[0], line.split()[2]] )
__a = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __A ( _A , _A ):
"""simple docstring"""
with open(_A ) as f:
__a = f.read(1 )
__a = start_node
__a = []
__a = start_node
__a = 0
while visiting not in first_solution:
__a = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_A ) and k[0] not in first_solution:
__a = k[1]
__a = k[0]
first_solution.append(_A )
__a = distance_of_first_solution + int(_A )
__a = best_node
first_solution.append(_A )
__a = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__a = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def __A ( _A , _A ):
"""simple docstring"""
__a = []
for n in solution[1:-1]:
__a = solution.index(_A )
for kn in solution[1:-1]:
__a = solution.index(_A )
if n == kn:
continue
__a = copy.deepcopy(_A )
__a = kn
__a = n
__a = 0
for k in _tmp[:-1]:
__a = _tmp[_tmp.index(_A ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__a = distance + int(i[1] )
_tmp.append(_A )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__a = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _A : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __A ( _A , _A , _A , _A , _A ):
"""simple docstring"""
__a = 1
__a = first_solution
__a = []
__a = distance_of_first_solution
__a = solution
while count <= iters:
__a = find_neighborhood(_A , _A )
__a = 0
__a = neighborhood[index_of_best_solution]
__a = len(_A ) - 1
__a = False
while not found:
__a = 0
while i < len(_A ):
if best_solution[i] != solution[i]:
__a = best_solution[i]
__a = solution[i]
break
__a = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__a = True
__a = best_solution[:-1]
__a = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__a = cost
__a = solution
else:
__a = index_of_best_solution + 1
__a = neighborhood[index_of_best_solution]
if len(_A ) >= size:
tabu_list.pop(0 )
__a = count + 1
return best_solution_ever, best_cost
def __A ( _A=None ):
"""simple docstring"""
__a = generate_neighbours(args.File )
__a , __a = generate_first_solution(
args.File , _A )
__a , __a = tabu_search(
_A , _A , _A , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 702
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
SCREAMING_SNAKE_CASE : str = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
SCREAMING_SNAKE_CASE : Dict = {
"""169M""": 768,
"""430M""": 1024,
"""1B5""": 2048,
"""3B""": 2560,
"""7B""": 4096,
"""14B""": 5120,
}
def __A ( _A ):
"""simple docstring"""
__a = list(state_dict.keys() )
for name in state_dict_keys:
__a = state_dict.pop(_A )
# emb -> embedding
if name.startswith("emb." ):
__a = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
__a = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
__a = re.sub(r"blocks\.(\d+)\.att" , r"blocks.\1.attention" , _A )
# ffn -> feed_forward
__a = re.sub(r"blocks\.(\d+)\.ffn" , r"blocks.\1.feed_forward" , _A )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
__a = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
__a = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
__a = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
__a = "rwkv." + name
__a = weight
return state_dict
def __A ( _A , _A , _A , _A=None , _A=None , _A=False , _A=None ):
"""simple docstring"""
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
__a = 5_0277
__a = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
__a = PreTrainedTokenizerFast(tokenizer_file=_A )
__a = len(_A )
tokenizer.save_pretrained(_A )
# 2. Build the config
__a = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__a = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
__a = RwkvConfig(
vocab_size=_A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_A )
# 3. Download model file then convert state_dict
__a = hf_hub_download(_A , _A )
__a = torch.load(_A , map_location="cpu" )
__a = convert_state_dict(_A )
# 4. Split in shards and save
__a , __a = shard_checkpoint(_A )
for shard_file, shard in shards.items():
torch.save(_A , os.path.join(_A , _A ) )
if index is not None:
__a = os.path.join(_A , _A )
# Save the index as well
with open(_A , "w" , encoding="utf-8" ) as f:
__a = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
__a = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__a = torch.load(os.path.join(_A , _A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_A , _A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
__a = AutoModelForCausalLM.from_pretrained(_A )
model.push_to_hub(_A , max_shard_size="2GB" )
tokenizer.push_to_hub(_A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 525
| 0
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class _SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , snake_case_ : Dict ):
"""simple docstring"""
A : Union[str, Any] = data
A : Optional[Any] = [0X67_452_301, 0Xef_cda_b89, 0X98_bad_cfe, 0X10_325_476, 0Xc3_d2e_1f0]
@staticmethod
def _UpperCAmelCase ( snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] ):
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0Xff_fff_fff
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
A : List[str] = b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64)
A : Any = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _UpperCAmelCase ( self : Any , snake_case_ : List[str] ):
"""simple docstring"""
A : List[str] = list(struct.unpack('''>16L''' , snake_case_ ) ) + [0] * 64
for i in range(16 , 80 ):
A : Tuple = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : List[Any] = self.padding()
A : str = self.split_blocks()
for block in self.blocks:
A : Any = self.expand_block(snake_case_ )
A , A , A , A , A : Tuple = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
A : Dict = (b & c) | ((~b) & d)
A : List[str] = 0X5a_827_999
elif 20 <= i < 40:
A : Dict = b ^ c ^ d
A : Any = 0X6e_d9e_ba1
elif 40 <= i < 60:
A : Tuple = (b & c) | (b & d) | (c & d)
A : str = 0X8f_1bb_cdc
elif 60 <= i < 80:
A : Union[str, Any] = b ^ c ^ d
A : List[str] = 0Xca_62c_1d6
A , A , A , A , A : Any = (
self.rotate(snake_case_ , 5 ) + f + e + k + expanded_block[i] & 0Xff_fff_fff,
a,
self.rotate(snake_case_ , 30 ),
c,
d,
)
A : Optional[int] = (
self.h[0] + a & 0Xff_fff_fff,
self.h[1] + b & 0Xff_fff_fff,
self.h[2] + c & 0Xff_fff_fff,
self.h[3] + d & 0Xff_fff_fff,
self.h[4] + e & 0Xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def _lowerCamelCase ( ):
'''simple docstring'''
A : Union[str, Any] = B'''Test String'''
assert SHAaHash(lowerCamelCase_ ).final_hash() == hashlib.shaa(lowerCamelCase_ ).hexdigest() # noqa: S324
def _lowerCamelCase ( ):
'''simple docstring'''
A : Union[str, Any] = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
A : List[str] = parser.parse_args()
A : List[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
A : Optional[int] = f.read()
else:
A : Union[str, Any] = bytes(lowerCamelCase_ , '''utf-8''' )
print(SHAaHash(lowerCamelCase_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 256
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = 'upernet'
def __init__( self : Optional[int] , snake_case_ : Dict=None , snake_case_ : Any=512 , snake_case_ : str=0.02 , snake_case_ : Optional[int]=[1, 2, 3, 6] , snake_case_ : Union[str, Any]=True , snake_case_ : Optional[Any]=0.4 , snake_case_ : Dict=384 , snake_case_ : List[Any]=256 , snake_case_ : str=1 , snake_case_ : Dict=False , snake_case_ : str=255 , **snake_case_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**snake_case_ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A : List[str] = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(snake_case_ , snake_case_ ):
A : Union[str, Any] = backbone_config.get('''model_type''' )
A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
A : str = config_class.from_dict(snake_case_ )
A : List[str] = backbone_config
A : str = hidden_size
A : Any = initializer_range
A : str = pool_scales
A : List[str] = use_auxiliary_head
A : Optional[Any] = auxiliary_loss_weight
A : Tuple = auxiliary_in_channels
A : Optional[Any] = auxiliary_channels
A : int = auxiliary_num_convs
A : Any = auxiliary_concat_input
A : Union[str, Any] = loss_ignore_index
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
A : Optional[Any] = copy.deepcopy(self.__dict__ )
A : str = self.backbone_config.to_dict()
A : List[str] = self.__class__.model_type
return output
| 256
| 1
|
"""simple docstring"""
from collections import Counter
from timeit import timeit
def lowerCAmelCase_( lowercase_ : str = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def lowerCAmelCase_( lowercase_ : str = "" ) -> bool:
if len(lowercase_ ) == 0:
return True
_lowerCamelCase = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_lowerCamelCase = {}
for character in lower_case_input_str:
_lowerCamelCase = character_freq_dict.get(lowercase_ , 0 ) + 1
_lowerCamelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowerCAmelCase_( lowercase_ : str = "" ) -> None:
print('''\nFor string = ''' , lowercase_ , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(lowercase_ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(lowercase_ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
__SCREAMING_SNAKE_CASE : int = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 623
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = tempfile.mkdtemp()
lowerCamelCase : int = BlipImageProcessor()
lowerCamelCase : Tuple = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
lowerCamelCase : int = BlipaProcessor(__A , __A )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self , **__A ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__A ).tokenizer
def _snake_case ( self , **__A ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__A ).image_processor
def _snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase : Optional[int] = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : Any = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase : str = self.get_image_processor(do_normalize=__A , padding_value=1.0 )
lowerCamelCase : Union[str, Any] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Dict = BlipaProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : str = self.prepare_image_inputs()
lowerCamelCase : List[str] = image_processor(__A , return_tensors="np" )
lowerCamelCase : List[Any] = processor(images=__A , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.get_image_processor()
lowerCamelCase : int = self.get_tokenizer()
lowerCamelCase : Tuple = BlipaProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : List[Any] = "lower newer"
lowerCamelCase : Optional[Any] = processor(text=__A )
lowerCamelCase : str = tokenizer(__A , return_token_type_ids=__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.get_image_processor()
lowerCamelCase : int = self.get_tokenizer()
lowerCamelCase : Optional[int] = BlipaProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[int] = "lower newer"
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : List[str] = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.get_image_processor()
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Optional[Any] = BlipaProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase : List[Any] = processor.batch_decode(__A )
lowerCamelCase : Tuple = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_image_processor()
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Dict = BlipaProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[Any] = "lower newer"
lowerCamelCase : Optional[int] = self.prepare_image_inputs()
lowerCamelCase : Dict = processor(text=__A , images=__A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 340
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Tuple = ["image_processor", "tokenizer"]
__A : Any = "ViTImageProcessor"
__A : Dict = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __A=None , __A=None , **__A ):
"""simple docstring"""
lowerCamelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __A , )
lowerCamelCase : Optional[Any] = kwargs.pop("feature_extractor" )
lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , __A=None , **__A ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCamelCase : Any = self.tokenizer(__A , return_tensors=__A , **__A )
if visual_prompt is not None:
lowerCamelCase : Optional[Any] = self.image_processor(__A , return_tensors=__A , **__A )
if images is not None:
lowerCamelCase : Optional[int] = self.image_processor(__A , return_tensors=__A , **__A )
if visual_prompt is not None and images is not None:
lowerCamelCase : Tuple = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCamelCase : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCamelCase : Optional[int] = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __A , )
return self.image_processor_class
@property
def _snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __A , )
return self.image_processor
| 340
| 1
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self , a__ ) -> Any:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
A = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(a__ )
def _UpperCAmelCase ( self ) -> Tuple:
A = """sshleifer/tiny-gpt2"""
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , )
A = PyTorchBenchmark(a__ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Any:
A = """sgugger/tiny-distilbert-classification"""
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , only_pretrain_model=a__ , )
A = PyTorchBenchmark(a__ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = """sshleifer/tiny-gpt2"""
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , torchscript=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , )
A = PyTorchBenchmark(a__ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _UpperCAmelCase ( self ) -> Tuple:
A = """sshleifer/tiny-gpt2"""
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , fpaa=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , )
A = PyTorchBenchmark(a__ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> str:
A = """sshleifer/tiny-gpt2"""
A = AutoConfig.from_pretrained(a__ )
# set architectures equal to `None`
A = None
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , )
A = PyTorchBenchmark(a__ , configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Dict:
A = """sshleifer/tiny-gpt2"""
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , )
A = PyTorchBenchmark(a__ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def _UpperCAmelCase ( self ) -> Dict:
A = """sshleifer/tiny-gpt2"""
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=a__ , multi_process=a__ , )
A = PyTorchBenchmark(a__ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> int:
A = """sshleifer/tiny-gpt2"""
A = AutoConfig.from_pretrained(a__ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , )
A = PyTorchBenchmark(a__ , configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
A = """sshleifer/tinier_bart"""
A = AutoConfig.from_pretrained(a__ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , )
A = PyTorchBenchmark(a__ , configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Dict:
A = """sshleifer/tiny-gpt2"""
A = AutoConfig.from_pretrained(a__ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , )
A = PyTorchBenchmark(a__ , configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Any:
A = """sshleifer/tinier_bart"""
A = AutoConfig.from_pretrained(a__ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a__ , )
A = PyTorchBenchmark(a__ , configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[int]:
A = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , save_to_csv=a__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(a__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(a__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(a__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(a__ , """env.csv""" ) , multi_process=a__ , )
A = PyTorchBenchmark(a__ )
benchmark.run()
self.assertTrue(Path(os.path.join(a__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(a__ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(a__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(a__ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(a__ , """env.csv""" ) ).exists() )
def _UpperCAmelCase ( self ) -> Optional[int]:
A = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(a__ ):
self.assertTrue(hasattr(a__ , """sequential""" ) )
self.assertTrue(hasattr(a__ , """cumulative""" ) )
self.assertTrue(hasattr(a__ , """current""" ) )
self.assertTrue(hasattr(a__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a__ , inference=a__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a__ , """log.txt""" ) , log_print=a__ , trace_memory_line_by_line=a__ , multi_process=a__ , )
A = PyTorchBenchmark(a__ )
A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(a__ , """log.txt""" ) ).exists() )
| 546
|
def _lowerCAmelCase ( UpperCamelCase__: str ) -> str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 546
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowercase : int = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__lowercase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Optional[int] =field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool =field(
default=a_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCamelCase : bool =field(
default=a_ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
lowerCamelCase : Optional[int] =field(
default=a_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] =field(
default=a_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] =field(
default=a_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : str =field(
default=a_ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : str =field(
default=a_ , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
lowerCamelCase : Optional[str] =field(
default=a_ , metadata={"help": "Train language if it is different from the evaluation language."} )
lowerCamelCase : Optional[str] =field(
default=a_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] =field(
default=a_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] =field(
default=a_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase : Optional[bool] =field(
default=a_ , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
lowerCamelCase : bool =field(
default=a_ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase : str =field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase : bool =field(
default=a_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCamelCase : bool =field(
default=a_ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def snake_case_ () -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" , __A )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase : int = training_args.get_process_log_level()
logger.setLevel(__A )
datasets.utils.logging.set_verbosity(__A )
transformers.utils.logging.set_verbosity(__A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__lowerCAmelCase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
__lowerCAmelCase : Union[str, Any] = load_dataset(
"""xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
__lowerCAmelCase : List[str] = load_dataset(
"""xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase : Dict = train_dataset.features["""label"""].names
if training_args.do_eval:
__lowerCAmelCase : Tuple = load_dataset(
"""xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase : Optional[int] = eval_dataset.features["""label"""].names
if training_args.do_predict:
__lowerCAmelCase : List[Any] = load_dataset(
"""xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase : Optional[Any] = predict_dataset.features["""label"""].names
# Labels
__lowerCAmelCase : Optional[int] = len(__A )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__A , idalabel={str(__A ): label for i, label in enumerate(__A )} , labelaid={label: i for i, label in enumerate(__A )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase : Dict = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
__lowerCAmelCase : List[str] = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowerCAmelCase : int = False
def preprocess_function(__A : List[str] ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] , examples["""hypothesis"""] , padding=__A , max_length=data_args.max_seq_length , truncation=__A , )
if training_args.do_train:
if data_args.max_train_samples is not None:
__lowerCAmelCase : Union[str, Any] = min(len(__A ) , data_args.max_train_samples )
__lowerCAmelCase : List[str] = train_dataset.select(range(__A ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__lowerCAmelCase : Dict = train_dataset.map(
__A , batched=__A , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__A ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__lowerCAmelCase : str = min(len(__A ) , data_args.max_eval_samples )
__lowerCAmelCase : Optional[int] = eval_dataset.select(range(__A ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__lowerCAmelCase : List[Any] = eval_dataset.map(
__A , batched=__A , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
__lowerCAmelCase : Dict = min(len(__A ) , data_args.max_predict_samples )
__lowerCAmelCase : Optional[int] = predict_dataset.select(range(__A ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
__lowerCAmelCase : Union[str, Any] = predict_dataset.map(
__A , batched=__A , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , )
# Get the metric function
__lowerCAmelCase : Optional[Any] = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__A : EvalPrediction ):
__lowerCAmelCase : Optional[int] = p.predictions[0] if isinstance(p.predictions , __A ) else p.predictions
__lowerCAmelCase : Tuple = np.argmax(__A , axis=1 )
return metric.compute(predictions=__A , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowerCAmelCase : Dict = default_data_collator
elif training_args.fpaa:
__lowerCAmelCase : Union[str, Any] = DataCollatorWithPadding(__A , pad_to_multiple_of=8 )
else:
__lowerCAmelCase : Optional[int] = None
# Initialize our Trainer
__lowerCAmelCase : Optional[int] = Trainer(
model=__A , args=__A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__A , tokenizer=__A , data_collator=__A , )
# Training
if training_args.do_train:
__lowerCAmelCase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase : Tuple = last_checkpoint
__lowerCAmelCase : List[str] = trainer.train(resume_from_checkpoint=__A )
__lowerCAmelCase : int = train_result.metrics
__lowerCAmelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__A )
)
__lowerCAmelCase : Optional[int] = min(__A , len(__A ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __A )
trainer.save_metrics("""train""" , __A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase : List[str] = trainer.evaluate(eval_dataset=__A )
__lowerCAmelCase : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__A )
__lowerCAmelCase : List[Any] = min(__A , len(__A ) )
trainer.log_metrics("""eval""" , __A )
trainer.save_metrics("""eval""" , __A )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Any = trainer.predict(__A , metric_key_prefix="""predict""" )
__lowerCAmelCase : List[Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__A )
)
__lowerCAmelCase : str = min(__A , len(__A ) )
trainer.log_metrics("""predict""" , __A )
trainer.save_metrics("""predict""" , __A )
__lowerCAmelCase : Any = np.argmax(__A , axis=1 )
__lowerCAmelCase : int = os.path.join(training_args.output_dir , """predictions.txt""" )
if trainer.is_world_process_zero():
with open(__A , """w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__A ):
__lowerCAmelCase : List[Any] = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 651
| 0
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _lowerCamelCase (lowerCamelCase ):
def __lowerCamelCase ( self ):
__snake_case = SMALL_MODEL_IDENTIFIER
__snake_case = 'pt'
__snake_case = 'tf'
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
__snake_case = TFAutoModel.from_pretrained(self.test_model , from_pt=SCREAMING_SNAKE_CASE_ )
model_tf.save_pretrained(SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = 'mock_framework'
# Framework provided - return whatever the user provides
__snake_case = FeaturesManager.determine_framework(self.test_model , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(SCREAMING_SNAKE_CASE_ )
__snake_case = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(SCREAMING_SNAKE_CASE_ )
__snake_case = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(SCREAMING_SNAKE_CASE_ )
__snake_case = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(SCREAMING_SNAKE_CASE_ )
__snake_case = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = MagicMock(return_value=SCREAMING_SNAKE_CASE_ )
with patch('transformers.onnx.features.is_tf_available' , SCREAMING_SNAKE_CASE_ ):
__snake_case = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__snake_case = MagicMock(return_value=SCREAMING_SNAKE_CASE_ )
with patch('transformers.onnx.features.is_torch_available' , SCREAMING_SNAKE_CASE_ ):
__snake_case = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_tf )
# Both in environment -> use PyTorch
__snake_case = MagicMock(return_value=SCREAMING_SNAKE_CASE_ )
__snake_case = MagicMock(return_value=SCREAMING_SNAKE_CASE_ )
with patch('transformers.onnx.features.is_tf_available' , SCREAMING_SNAKE_CASE_ ), patch(
'transformers.onnx.features.is_torch_available' , SCREAMING_SNAKE_CASE_ ):
__snake_case = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_pt )
# Both not in environment -> raise error
__snake_case = MagicMock(return_value=SCREAMING_SNAKE_CASE_ )
__snake_case = MagicMock(return_value=SCREAMING_SNAKE_CASE_ )
with patch('transformers.onnx.features.is_tf_available' , SCREAMING_SNAKE_CASE_ ), patch(
'transformers.onnx.features.is_torch_available' , SCREAMING_SNAKE_CASE_ ):
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
__snake_case = FeaturesManager.determine_framework(self.test_model )
| 345
|
import os
import sys
import unittest
lowerCamelCase_ : Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCamelCase_ : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class _lowerCamelCase (unittest.TestCase ):
def __lowerCamelCase ( self ):
__snake_case = find_backend(' if not is_torch_available():' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , 'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__snake_case = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , 'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__snake_case = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , 'torch_and_transformers_and_onnx' )
def __lowerCamelCase ( self ):
__snake_case = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , SCREAMING_SNAKE_CASE_ )
self.assertIn('torch_and_transformers' , SCREAMING_SNAKE_CASE_ )
self.assertIn('flax_and_transformers' , SCREAMING_SNAKE_CASE_ )
self.assertIn('torch_and_transformers_and_onnx' , SCREAMING_SNAKE_CASE_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] )
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] )
def __lowerCamelCase ( self ):
__snake_case = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '\nCONSTANT = None\n' )
__snake_case = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
__snake_case = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
__snake_case = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
__snake_case = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , SCREAMING_SNAKE_CASE_ )
| 345
| 1
|
from __future__ import annotations
import queue
class _lowercase :
def __init__( self : List[Any] , lowerCamelCase__ : int ) -> Dict:
"""simple docstring"""
A_ = data
A_ = None
A_ = None
def _lowerCamelCase ( ):
'''simple docstring'''
print('''\n********Press N to stop entering at any point of time********\n''' )
A_ = input('''Enter the value of the root node: ''' ).strip().lower()
A_ = queue.Queue()
A_ = TreeNode(int(__a ) )
q.put(__a )
while not q.empty():
A_ = q.get()
A_ = f"Enter the left node of {node_found.data}: "
A_ = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
A_ = TreeNode(int(__a ) )
A_ = left_node
q.put(__a )
A_ = f"Enter the right node of {node_found.data}: "
A_ = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
A_ = TreeNode(int(__a ) )
A_ = right_node
q.put(__a )
raise
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__a , __a ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__a , __a ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__a , __a ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__a , __a ) or not node:
return
A_ = queue.Queue()
q.put(__a )
while not q.empty():
A_ = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__a , __a ) or not node:
return
A_ = queue.Queue()
q.put(__a )
while not q.empty():
A_ = []
while not q.empty():
A_ = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__a )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__a , __a ) or not node:
return
A_ = []
A_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(__a )
A_ = n.left
# end of while means current node doesn't have left child
A_ = stack.pop()
# start to traverse its right child
A_ = n.right
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__a , __a ) or not node:
return
A_ = []
A_ = node
while n or stack:
while n:
stack.append(__a )
A_ = n.left
A_ = stack.pop()
print(n.data , end=''',''' )
A_ = n.right
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(__a , __a ) or not node:
return
A_ ,A_ = [], []
A_ = node
stacka.append(__a )
while stacka: # to find the reversed order of post order, store it in stack2
A_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__a )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = "" , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE="*" ):
'''simple docstring'''
if not s:
return "\n" + width * char
A_ ,A_ = divmod(width - len(__a ) - 2 , 2 )
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
__lowercase = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 203
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase: Tuple = get_logger(__name__)
_lowerCAmelCase: List[str] = Path(__file__).parent / 'model_card_template.md'
_lowerCAmelCase: Any = uuida().hex
_lowerCAmelCase: List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowercase( __a : Union[Dict, str, None] = None ):
a__ =f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def _lowercase( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ):
if token is None:
a__ =HfFolder.get_token()
if organization is None:
a__ =whoami(__a )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase( __a : Union[str, Any] , __a : Dict ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__a , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a__ =args.hub_token if hasattr(__a , 'hub_token' ) else None
a__ =get_full_repo_name(__a , token=__a )
a__ =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__a , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a__ =os.path.join(args.output_dir , 'README.md' )
model_card.save(__a )
def _lowercase( __a : Optional[str] , __a : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
a__ =str(Path(__a ).as_posix() )
a__ =re.search(r'snapshots/([^/]+)/' , __a )
if search is None:
return None
a__ =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase: List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_lowerCAmelCase: List[str] = os.path.join(hf_cache_home, 'diffusers')
def _lowercase( __a : Optional[str] = None , __a : Optional[str] = None ):
if new_cache_dir is None:
a__ =DIFFUSERS_CACHE
if old_cache_dir is None:
a__ =old_diffusers_cache
a__ =Path(__a ).expanduser()
a__ =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a__ =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase: Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase: int = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase: List[Any] = int(f.read())
except ValueError:
_lowerCAmelCase: Any = 0
if cache_version < 1:
_lowerCAmelCase: str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase: Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _lowercase( __a : str , __a : Optional[str] = None ):
if variant is not None:
a__ =weights_name.split('.' )
a__ =splits[:-1] + [variant] + splits[-1:]
a__ ='.'.join(__a )
return weights_name
def _lowercase( __a : Union[str, Any] , *,
__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : int=None , ):
a__ =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
a__ =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
a__ =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('0.20.0' )
):
try:
a__ =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
a__ =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 20
| 0
|
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 712
|
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 666
| 0
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
"""simple docstring"""
def __init__( self : str ,__A : Union[str, Any] ,__A : Any=13 ,__A : Union[str, Any]=7 ,__A : List[Any]=True ,__A : int=True ,__A : Optional[Any]=False ,__A : Union[str, Any]=True ,__A : Union[str, Any]=99 ,__A : Any=32 ,__A : Tuple=5 ,__A : Tuple=4 ,__A : Union[str, Any]=37 ,__A : str="gelu" ,__A : Optional[int]=0.1 ,__A : Dict=0.1 ,__A : Any=512 ,__A : Optional[Any]=16 ,__A : Any=2 ,__A : List[str]=0.02 ,__A : Dict=3 ,__A : Any=4 ,__A : int=None ,) -> Dict:
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_input_mask
_lowercase = use_token_type_ids
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = num_labels
_lowercase = num_choices
_lowercase = scope
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase = None
if self.use_input_mask:
_lowercase = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase = None
if self.use_token_type_ids:
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowercase = None
_lowercase = None
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowercase = ids_tensor([self.batch_size] ,self.num_choices )
_lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
return BioGptConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__A ,initializer_range=self.initializer_range ,)
def __UpperCAmelCase ( self : Optional[Any] ,__A : List[Any] ,__A : Optional[int] ,__A : Optional[Any] ,__A : str ,__A : int ,__A : int ,__A : Optional[Any] ) -> int:
_lowercase = BioGptModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A ,attention_mask=__A )
_lowercase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[str] ,__A : Optional[Any] ,__A : Dict ,__A : Dict ,__A : Union[str, Any] ,__A : str ,__A : Any ,__A : str ,__A : List[Any] ,__A : List[Any] ,) -> Tuple:
_lowercase = BioGptForCausalLM(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A ,attention_mask=__A ,token_type_ids=__A ,labels=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Optional[int] ,__A : str ,__A : int ,__A : int ,__A : int ,__A : List[Any] ,*__A : Union[str, Any] ) -> Tuple:
_lowercase = BioGptModel(config=__A )
model.to(__A )
model.eval()
# create attention mask
_lowercase = torch.ones(input_ids.shape ,dtype=torch.long ,device=__A )
_lowercase = self.seq_length // 2
_lowercase = 0
# first forward pass
_lowercase , _lowercase = model(__A ,attention_mask=__A ).to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# change a random masked slice from input_ids
_lowercase = ids_tensor((1,) ,__A ).item() + 1
_lowercase = ids_tensor((self.batch_size, 1) ,config.vocab_size ).squeeze(-1 )
_lowercase = random_other_next_tokens
# append to next input_ids and attn_mask
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) ,dtype=torch.long ,device=__A )] ,dim=1 ,)
# get two different outputs
_lowercase = model(__A ,attention_mask=__A )['last_hidden_state']
_lowercase = model(__A ,past_key_values=__A ,attention_mask=__A )['last_hidden_state']
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Tuple ,__A : List[str] ,__A : Any ,__A : List[Any] ,__A : Dict ,__A : Optional[int] ,*__A : List[Any] ) -> List[str]:
_lowercase = BioGptModel(config=__A ).to(__A ).eval()
_lowercase = torch.ones(input_ids.shape ,dtype=torch.long ,device=__A )
# first forward pass
_lowercase = model(__A ,attention_mask=__A ,use_cache=__A )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 3) ,config.vocab_size )
_lowercase = ids_tensor((self.batch_size, 3) ,2 )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = torch.cat([attention_mask, next_attn_mask] ,dim=-1 )
_lowercase = model(__A ,attention_mask=__A )['last_hidden_state']
_lowercase = model(__A ,attention_mask=__A ,past_key_values=__A )[
'last_hidden_state'
]
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ,__A : List[str] ,__A : int ,__A : List[str] ,__A : List[str] ,__A : List[Any] ,*__A : Any ,__A : Union[str, Any]=False ) -> Any:
_lowercase = BioGptForCausalLM(__A )
model.to(__A )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_lowercase = model(__A ,labels=__A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __UpperCAmelCase ( self : Tuple ,__A : str ,*__A : Tuple ) -> List[str]:
_lowercase = BioGptModel(__A )
_lowercase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) ,0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) ,0.01 )
def __UpperCAmelCase ( self : List[Any] ,__A : Union[str, Any] ,__A : Optional[Any] ,__A : Any ,__A : Union[str, Any] ,__A : Dict ,*__A : Any ) -> int:
_lowercase = self.num_labels
_lowercase = BioGptForTokenClassification(__A )
model.to(__A )
model.eval()
_lowercase = model(__A ,attention_mask=__A ,token_type_ids=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
_lowercase = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = config_and_inputs
_lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (BioGptForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
_lowercase = BioGptModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,hidden_size=37 )
def __UpperCAmelCase ( self : str ) -> Any:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : str ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
_lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase = type
self.model_tester.create_and_check_model(*__A )
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__A )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__A ,gradient_checkpointing=__A )
def __UpperCAmelCase ( self : Any ) -> Tuple:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__A )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__A )
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__A )
@slow
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
_lowercase = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(__A )
_lowercase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
_lowercase = 'left'
# Define PAD Token = EOS Token = 50256
_lowercase = tokenizer.eos_token
_lowercase = model.config.eos_token_id
# use different length sentences to test batching
_lowercase = [
'Hello, my dog is a little',
'Today, I',
]
_lowercase = tokenizer(__A ,return_tensors='pt' ,padding=__A )
_lowercase = inputs['input_ids'].to(__A )
_lowercase = model.generate(
input_ids=__A ,attention_mask=inputs['attention_mask'].to(__A ) ,)
_lowercase = tokenizer(sentences[0] ,return_tensors='pt' ).input_ids.to(__A )
_lowercase = model.generate(input_ids=__A )
_lowercase = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
_lowercase = tokenizer(sentences[1] ,return_tensors='pt' ).input_ids.to(__A )
_lowercase = model.generate(input_ids=__A ,max_length=model.config.max_length - num_paddings )
_lowercase = tokenizer.batch_decode(__A ,skip_special_tokens=__A )
_lowercase = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=__A )
_lowercase = tokenizer.decode(output_padded[0] ,skip_special_tokens=__A )
_lowercase = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(__A ,__A )
self.assertListEqual(__A ,[non_padded_sentence, padded_sentence] )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = BioGptModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __UpperCAmelCase ( self : int ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = 3
_lowercase = input_dict['input_ids']
_lowercase = input_ids.ne(1 ).to(__A )
_lowercase = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
_lowercase = BioGptForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowercase = model(__A ,attention_mask=__A ,labels=__A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = 3
_lowercase = 'multi_label_classification'
_lowercase = input_dict['input_ids']
_lowercase = input_ids.ne(1 ).to(__A )
_lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
_lowercase = BioGptForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowercase = model(__A ,attention_mask=__A ,labels=__A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
_lowercase = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
_lowercase = torch.tensor([[2, 4805, 9, 656, 21]] )
_lowercase = model(__A )[0]
_lowercase = 4_2384
_lowercase = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape ,__A )
_lowercase = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__A ,atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
_lowercase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
_lowercase = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(__A )
torch.manual_seed(0 )
_lowercase = tokenizer('COVID-19 is' ,return_tensors='pt' ).to(__A )
_lowercase = model.generate(
**__A ,min_length=100 ,max_length=1024 ,num_beams=5 ,early_stopping=__A ,)
_lowercase = tokenizer.decode(output_ids[0] ,skip_special_tokens=__A )
_lowercase = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(__A ,__A )
| 67
|
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('check_bouncy() accepts only integer arguments' )
_A = str(_SCREAMING_SNAKE_CASE )
_A = ''.join(sorted(_SCREAMING_SNAKE_CASE ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
_A = 0
_A = 1
while True:
if check_bouncy(_SCREAMING_SNAKE_CASE ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(99)}")
| 27
| 0
|
import math
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_6_0:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__lowerCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 203
|
from maths.prime_check import is_prime
def lowerCAmelCase_ ( __lowerCamelCase ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__snake_case : List[str] = F'Input value of [number={number}] must be an integer'
raise TypeError(__lowerCamelCase )
if is_prime(__lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203
| 1
|
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__lowerCamelCase = sys.version_info >= (3, 10)
def a ( __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Union[str, Any]=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=__lowerCAmelCase )
@dataclass
class __A :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
@dataclass
class __A :
UpperCAmelCase__ = 4_2
UpperCAmelCase__ = field(default="toto" ,metadata={"help": "help message"} )
@dataclass
class __A :
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = None
class __A ( UpperCamelCase_ ):
UpperCAmelCase__ = """titi"""
UpperCAmelCase__ = """toto"""
class __A ( UpperCamelCase_ ):
UpperCAmelCase__ = """titi"""
UpperCAmelCase__ = """toto"""
UpperCAmelCase__ = 4_2
@dataclass
class __A :
UpperCAmelCase__ = "toto"
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
__magic_name__: Optional[int] = BasicEnum(self.foo )
@dataclass
class __A :
UpperCAmelCase__ = "toto"
def lowerCamelCase__ ( self : str ) -> Dict:
__magic_name__: str = MixedTypeEnum(self.foo )
@dataclass
class __A :
UpperCAmelCase__ = None
UpperCAmelCase__ = field(default=UpperCamelCase_ ,metadata={"help": "help message"} )
UpperCAmelCase__ = None
UpperCAmelCase__ = list_field(default=[] )
UpperCAmelCase__ = list_field(default=[] )
@dataclass
class __A :
UpperCAmelCase__ = list_field(default=[] )
UpperCAmelCase__ = list_field(default=[1, 2, 3] )
UpperCAmelCase__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
UpperCAmelCase__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __A :
UpperCAmelCase__ = field()
UpperCAmelCase__ = field()
UpperCAmelCase__ = field()
def lowerCamelCase__ ( self : str ) -> str:
__magic_name__: Dict = BasicEnum(self.required_enum )
@dataclass
class __A :
UpperCAmelCase__ = 42
UpperCAmelCase__ = field()
UpperCAmelCase__ = None
UpperCAmelCase__ = field(default="toto" ,metadata={"help": "help message"} )
UpperCAmelCase__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class __A :
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = None
@dataclass
class __A :
UpperCAmelCase__ = None
UpperCAmelCase__ = field(default=UpperCamelCase_ ,metadata={"help": "help message"} )
UpperCAmelCase__ = None
UpperCAmelCase__ = list_field(default=[] )
UpperCAmelCase__ = list_field(default=[] )
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : str ) -> Dict:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__magic_name__: Any = {k: v for k, v in vars(_a ).items() if k != """container"""}
__magic_name__: Optional[Any] = {k: v for k, v in vars(_a ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , _a ) and yy.get("""choices""" , _a ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](_a ) , yy["""type"""](_a ) )
del xx["type"], yy["type"]
self.assertEqual(_a , _a )
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
__magic_name__: Any = HfArgumentParser(_a )
__magic_name__: Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_a , required=_a )
expected.add_argument("""--bar""" , type=_a , required=_a )
expected.add_argument("""--baz""" , type=_a , required=_a )
expected.add_argument("""--flag""" , type=_a , default=_a , const=_a , nargs="""?""" )
self.argparsersEqual(_a , _a )
__magic_name__: Dict = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
(__magic_name__ ): Optional[int] = parser.parse_args_into_dataclasses(_a , look_for_args_file=_a )
self.assertFalse(example.flag )
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
__magic_name__: List[Any] = HfArgumentParser(_a )
__magic_name__: str = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=4_2 , type=_a )
expected.add_argument("""--baz""" , default="""toto""" , type=_a , help="""help message""" )
self.argparsersEqual(_a , _a )
def lowerCamelCase__ ( self : Dict ) -> Any:
__magic_name__: Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_a , default=_a , const=_a , nargs="""?""" )
expected.add_argument("""--baz""" , type=_a , default=_a , const=_a , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=_a , dest="""baz""" )
expected.add_argument("""--opt""" , type=_a , default=_a )
__magic_name__: Tuple = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_a )
for dataclass_type in dataclass_types:
__magic_name__: List[Any] = HfArgumentParser(_a )
self.argparsersEqual(_a , _a )
__magic_name__: Any = parser.parse_args([] )
self.assertEqual(_a , Namespace(foo=_a , baz=_a , opt=_a ) )
__magic_name__: Any = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(_a , Namespace(foo=_a , baz=_a , opt=_a ) )
__magic_name__: Union[str, Any] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(_a , Namespace(foo=_a , baz=_a , opt=_a ) )
__magic_name__: Optional[int] = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(_a , Namespace(foo=_a , baz=_a , opt=_a ) )
__magic_name__: int = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(_a , Namespace(foo=_a , baz=_a , opt=_a ) )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
__magic_name__: Optional[int] = HfArgumentParser(_a )
__magic_name__: int = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 4_2] , type=make_choice_type_function(["""titi""", """toto""", 4_2] ) , )
self.argparsersEqual(_a , _a )
__magic_name__: Tuple = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
__magic_name__: int = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__magic_name__: List[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
__magic_name__: Tuple = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__magic_name__: Dict = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 4_2 )
__magic_name__: List[str] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
@dataclass
class __A :
UpperCAmelCase__ = "toto"
__magic_name__: str = HfArgumentParser(_a )
__magic_name__: Any = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 4_2) , type=make_choice_type_function(["""titi""", """toto""", 4_2] ) , )
self.argparsersEqual(_a , _a )
__magic_name__: Optional[int] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
__magic_name__: str = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
__magic_name__: Any = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 4_2 )
def lowerCamelCase__ ( self : int ) -> Optional[int]:
__magic_name__: Dict = HfArgumentParser(_a )
__magic_name__: int = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=_a )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=_a )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_a )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=_a )
self.argparsersEqual(_a , _a )
__magic_name__: str = parser.parse_args([] )
self.assertEqual(
_a , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
__magic_name__: Union[str, Any] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(_a , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
__magic_name__: Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=_a , type=_a )
expected.add_argument("""--bar""" , default=_a , type=_a , help="""help message""" )
expected.add_argument("""--baz""" , default=_a , type=_a )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=_a )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=_a )
__magic_name__: Union[str, Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_a )
for dataclass_type in dataclass_types:
__magic_name__: Dict = HfArgumentParser(_a )
self.argparsersEqual(_a , _a )
__magic_name__: List[Any] = parser.parse_args([] )
self.assertEqual(_a , Namespace(foo=_a , bar=_a , baz=_a , ces=[] , des=[] ) )
__magic_name__: List[str] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(_a , Namespace(foo=1_2 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def lowerCamelCase__ ( self : int ) -> Any:
__magic_name__: List[Any] = HfArgumentParser(_a )
__magic_name__: Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=_a , required=_a )
expected.add_argument("""--required_str""" , type=_a , required=_a )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=_a , )
self.argparsersEqual(_a , _a )
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
__magic_name__: Optional[Any] = HfArgumentParser(_a )
__magic_name__: Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_a , required=_a )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=_a , )
expected.add_argument("""--opt""" , type=_a , default=_a )
expected.add_argument("""--baz""" , default="""toto""" , type=_a , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_a )
self.argparsersEqual(_a , _a )
def lowerCamelCase__ ( self : Dict ) -> Dict:
__magic_name__: Dict = HfArgumentParser(_a )
__magic_name__: Union[str, Any] = {
"""foo""": 1_2,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
__magic_name__: List[Any] = parser.parse_dict(_a )[0]
__magic_name__: str = BasicExample(**_a )
self.assertEqual(_a , _a )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
__magic_name__: Optional[Any] = HfArgumentParser(_a )
__magic_name__: str = {
"""foo""": 1_2,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 4_2,
}
self.assertRaises(_a , parser.parse_dict , _a , allow_extra_keys=_a )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__magic_name__: Optional[int] = HfArgumentParser(_a )
__magic_name__: Any = {
"""foo""": 1_2,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__: str = os.path.join(_a , """temp_json""" )
os.mkdir(_a )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(_a , _a )
__magic_name__: Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
__magic_name__: Optional[Any] = BasicExample(**_a )
self.assertEqual(_a , _a )
def lowerCamelCase__ ( self : str ) -> List[str]:
__magic_name__: Optional[Any] = HfArgumentParser(_a )
__magic_name__: Any = {
"""foo""": 1_2,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__: int = os.path.join(_a , """temp_yaml""" )
os.mkdir(_a )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(_a , _a )
__magic_name__: List[Any] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
__magic_name__: int = BasicExample(**_a )
self.assertEqual(_a , _a )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
__magic_name__: str = HfArgumentParser(_a )
self.assertIsNotNone(_a )
| 96
|
"""simple docstring"""
from math import factorial
def _lowercase ( __lowerCAmelCase = 100 ) -> int:
return sum(int(__lowerCAmelCase ) for x in str(factorial(__lowerCAmelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 680
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
__lowerCAmelCase : Any = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__lowerCAmelCase : List[Any] = model(lowerCAmelCase__ )["last_hidden_state"]
__lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase__ )
# compare the actual values for a slice.
__lowerCAmelCase : str = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 709
|
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class A__ :
def __init__( self ):
__lowerCAmelCase : Any = {}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = {}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if nodea not in self.connections:
self.add_node(_SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = probability
def __lowerCamelCase ( self ):
return list(self.connections )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : List[Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : str = Counter(graph.get_nodes() )
__lowerCAmelCase : Tuple = start
for _ in range(_UpperCamelCase ):
__lowerCAmelCase : int = graph.transition(_UpperCamelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 549
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase ={"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =["ViTFeatureExtractor"]
lowerCamelCase =["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 285
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : int = tempfile.mkdtemp()
UpperCamelCase__ : str = BlipImageProcessor()
UpperCamelCase__ : List[Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
UpperCamelCase__ : List[Any] = BlipaProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self , **__SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).tokenizer
def __SCREAMING_SNAKE_CASE ( self , **__SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).image_processor
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCamelCase__ : Dict = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : str = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase__ : str = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
UpperCamelCase__ : Optional[Any] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = self.get_image_processor()
UpperCamelCase__ : Tuple = self.get_tokenizer()
UpperCamelCase__ : int = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = self.prepare_image_inputs()
UpperCamelCase__ : Optional[Any] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
UpperCamelCase__ : List[Any] = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : str = self.get_image_processor()
UpperCamelCase__ : Any = self.get_tokenizer()
UpperCamelCase__ : Tuple = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = '''lower newer'''
UpperCamelCase__ : Tuple = processor(text=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = tokenizer(__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Optional[int] = self.get_tokenizer()
UpperCamelCase__ : Dict = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = '''lower newer'''
UpperCamelCase__ : Any = self.prepare_image_inputs()
UpperCamelCase__ : Any = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.get_image_processor()
UpperCamelCase__ : Optional[int] = self.get_tokenizer()
UpperCamelCase__ : Tuple = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : Tuple = processor.batch_decode(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : int = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = '''lower newer'''
UpperCamelCase__ : List[str] = self.prepare_image_inputs()
UpperCamelCase__ : List[Any] = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 285
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 354
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : Any = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
_lowercase : Any = DatasetInfosDict.from_directory(lowerCamelCase_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : Union[str, Any] = str(lowerCamelCase_ )
dataset_info.write_to_directory(lowerCamelCase_ )
_lowercase : List[str] = DatasetInfo.from_directory(lowerCamelCase_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCamelCase_ , 'dataset_info.json' ) )
def UpperCamelCase_( ) -> int:
_lowercase : Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_lowercase : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(lowerCamelCase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_lowercase : str = yaml.safe_dump(lowerCamelCase_ )
_lowercase : str = yaml.safe_load(lowerCamelCase_ )
assert dataset_info_yaml_dict == reloaded
def UpperCamelCase_( ) -> int:
_lowercase : Tuple = DatasetInfo()
_lowercase : Tuple = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : Tuple = str(lowerCamelCase_ )
dataset_infos_dict.write_to_directory(lowerCamelCase_ )
_lowercase : Tuple = DatasetInfosDict.from_directory(lowerCamelCase_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowercase : Any = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowercase : str = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCamelCase_ , 'README.md' ) )
| 354
| 1
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a__ = get_tests_dir('''fixtures''')
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Tuple:
# A mock response for an HTTP head request to emulate server down
_a : Optional[Any] = mock.Mock()
_a : List[Any] = 5_0_0
_a : Optional[Any] = {}
_a : List[Any] = HTTPError
_a : Optional[Any] = {}
# Download this model to make sure it's in the cache.
_a : Dict = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_a ) as mock_head:
_a : Tuple = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
_a : int = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls ) -> Optional[Any]:
_a : str = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def __lowercase ( self ) -> str:
_a : int = WavaVecaFeatureExtractor.from_pretrained(_a )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
_a : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_a , repo_id='''test-feature-extractor''' , push_to_hub=_a , use_auth_token=self._token )
_a : Tuple = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
def __lowercase ( self ) -> Dict:
_a : str = WavaVecaFeatureExtractor.from_pretrained(_a )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
_a : Dict = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_a , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=_a , use_auth_token=self._token )
_a : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(_a , _a ) )
def __lowercase ( self ) -> Any:
CustomFeatureExtractor.register_for_auto_class()
_a : Dict = CustomFeatureExtractor.from_pretrained(_a )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
_a : Tuple = AutoFeatureExtractor.from_pretrained(
F"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=_a )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 14
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ = TypeVar("""T""")
class a_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , A , A ) -> None:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = len(A )
_SCREAMING_SNAKE_CASE = [any_type for _ in range(self.N )] + arr
_SCREAMING_SNAKE_CASE = fnc
self.build()
def snake_case_( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
_SCREAMING_SNAKE_CASE = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def snake_case_( self , A , A ) -> None:
p += self.N
_SCREAMING_SNAKE_CASE = v
while p > 1:
_SCREAMING_SNAKE_CASE = p // 2
_SCREAMING_SNAKE_CASE = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def snake_case_( self , A , A ) -> T | None: # noqa: E741
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = l + self.N, r + self.N
_SCREAMING_SNAKE_CASE = None
while l <= r:
if l % 2 == 1:
_SCREAMING_SNAKE_CASE = self.st[l] if res is None else self.fn(A , self.st[l] )
if r % 2 == 0:
_SCREAMING_SNAKE_CASE = self.st[r] if res is None else self.fn(A , self.st[r] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
lowercase_ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
lowercase_ = SegmentTree(test_array, min)
lowercase_ = SegmentTree(test_array, max)
lowercase_ = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase ( ) ->None:
for i in range(len(__lowerCamelCase ) ):
for j in range(__lowerCamelCase , len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = reduce(__lowerCamelCase , test_array[i : j + 1] )
_SCREAMING_SNAKE_CASE = reduce(__lowerCamelCase , test_array[i : j + 1] )
_SCREAMING_SNAKE_CASE = reduce(lambda __lowerCamelCase , __lowerCamelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__lowerCamelCase , __lowerCamelCase )
assert max_range == max_segment_tree.query(__lowerCamelCase , __lowerCamelCase )
assert sum_range == sum_segment_tree.query(__lowerCamelCase , __lowerCamelCase )
test_all_segments()
for index, value in test_updates.items():
lowercase_ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 314
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 65
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
__a = number_of_bytes // partitions
__a = []
for i in range(lowerCAmelCase__ ):
__a = i * bytes_per_partition + 1
__a = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
| 1
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( __snake_case ):
_SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
_SCREAMING_SNAKE_CASE = 'BlipImageProcessor'
_SCREAMING_SNAKE_CASE = 'AutoTokenizer'
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ):
__a = False
super().__init__(A_ , A_ )
__a = self.image_processor
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int = None , __SCREAMING_SNAKE_CASE : Tuple = None , __SCREAMING_SNAKE_CASE : List[str] = True , __SCREAMING_SNAKE_CASE : Dict = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Union[str, Any] = None , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : Tuple = None , __SCREAMING_SNAKE_CASE : Union[str, Any] = None , __SCREAMING_SNAKE_CASE : List[Any] = False , __SCREAMING_SNAKE_CASE : Tuple = False , __SCREAMING_SNAKE_CASE : List[str] = False , __SCREAMING_SNAKE_CASE : List[Any] = False , __SCREAMING_SNAKE_CASE : int = False , __SCREAMING_SNAKE_CASE : List[Any] = True , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : Any , ):
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
__a = self.tokenizer
__a = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
# add pixel_values
__a = self.image_processor(A_ , return_tensors=A_ )
if text is not None:
__a = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
else:
__a = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def _UpperCAmelCase ( self : Any , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : List[str] ):
return self.tokenizer.batch_decode(*A_ , **A_ )
def _UpperCAmelCase ( self : List[Any] , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Tuple ):
return self.tokenizer.decode(*A_ , **A_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCAmelCase ( self : List[Any] ):
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 197
|
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
UpperCamelCase : List[Any] = [1]
for i in range(2 , _lowerCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase : Tuple = []
UpperCamelCase : str = list(range(_lowerCAmelCase ) )
# Find permutation
while factorials:
UpperCamelCase : int = factorials.pop()
UpperCamelCase , UpperCamelCase : int = divmod(_lowerCAmelCase , _lowerCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629
| 0
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class lowerCAmelCase__ ( A_ ):
__a = """bart"""
__a = ["""past_key_values"""]
__a = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Any , _lowerCamelCase : List[str]=50265 , _lowerCamelCase : Any=1024 , _lowerCamelCase : Dict=12 , _lowerCamelCase : Optional[int]=4096 , _lowerCamelCase : Optional[int]=16 , _lowerCamelCase : List[Any]=12 , _lowerCamelCase : List[str]=4096 , _lowerCamelCase : Dict=16 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Dict="gelu" , _lowerCamelCase : Optional[Any]=1024 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : int=0.0 , _lowerCamelCase : int=0.0_2 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : List[str]=False , _lowerCamelCase : str=True , _lowerCamelCase : int=3 , _lowerCamelCase : str=1 , _lowerCamelCase : int=0 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : str=2 , **_lowerCamelCase : List[Any] , ):
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = d_model
_snake_case = encoder_ffn_dim
_snake_case = encoder_layers
_snake_case = encoder_attention_heads
_snake_case = decoder_ffn_dim
_snake_case = decoder_layers
_snake_case = decoder_attention_heads
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = activation_function
_snake_case = init_std
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = classifier_dropout
_snake_case = use_cache
_snake_case = encoder_layers
_snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , _lowerCamelCase ):
_snake_case = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
class lowerCAmelCase__ ( A_ ):
@property
def lowercase ( self : List[str] ):
if self.task in ["default", "seq2seq-lm"]:
_snake_case = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_snake_case = {0: '''batch'''}
_snake_case = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
_snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_snake_case = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_snake_case , _snake_case = self.num_layers
for i in range(_lowerCamelCase ):
_snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
_snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
_snake_case = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def lowercase ( self : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
_snake_case = super().outputs
else:
_snake_case = super(_lowerCamelCase , self ).outputs
if self.use_past:
_snake_case , _snake_case = self.num_layers
for i in range(_lowerCamelCase ):
_snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
_snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowercase ( self : Dict , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[TensorType] = None , ):
_snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Generate decoder inputs
_snake_case = seq_length if not self.use_past else 1
_snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_snake_case = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_snake_case = dict(**_lowerCamelCase , **_lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_snake_case , _snake_case = common_inputs['''input_ids'''].shape
_snake_case = common_inputs['''decoder_input_ids'''].shape[1]
_snake_case , _snake_case = self.num_attention_heads
_snake_case = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_snake_case = decoder_seq_length + 3
_snake_case = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_snake_case = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 )
_snake_case = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_snake_case , _snake_case = self.num_layers
_snake_case = min(_lowerCamelCase , _lowerCamelCase )
_snake_case = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers
_snake_case = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
) )
# TODO: test this.
_snake_case = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowerCamelCase , _lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) )
return common_inputs
def lowercase ( self : List[Any] , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[TensorType] = None , ):
_snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_snake_case , _snake_case = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case , _snake_case = self.num_layers
_snake_case , _snake_case = self.num_attention_heads
_snake_case = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_snake_case = common_inputs['''attention_mask'''].dtype
_snake_case = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
_snake_case = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase )
]
return common_inputs
def lowercase ( self : Dict , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_snake_case = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_snake_case = tokenizer.num_special_tokens_to_add(_lowerCamelCase )
_snake_case = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
_snake_case = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
_snake_case = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return common_inputs
def lowercase ( self : Optional[int] , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
_snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
elif self.task == "causal-lm":
_snake_case = self._generate_dummy_inputs_for_causal_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
else:
_snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
return common_inputs
def lowercase ( self : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : int ):
if self.task in ["default", "seq2seq-lm"]:
_snake_case = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
_snake_case = super(_lowerCamelCase , self )._flatten_past_key_values_(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 704
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str = "cpu" , __lowerCamelCase : Union[str, None] = None ) -> None:
_snake_case = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCamelCase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
_snake_case = v.half()
if save_path is None: # overwrite src_path
_snake_case = src_path
torch.save(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 430
| 0
|
from cva import destroyAllWindows, imread, imshow, waitKey
def _A ( SCREAMING_SNAKE_CASE ):
# getting number of pixels in the image
UpperCAmelCase__ , UpperCAmelCase__: str = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Tuple = [2_5_5, 2_5_5, 2_5_5] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_lowerCAmelCase : Any =imread("""image_data/lena.jpg""", 1)
# convert to its negative
_lowerCAmelCase : Any =convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 113
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : str =logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : Any ="""
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE=8 ):
UpperCAmelCase__: int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase__: Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __UpperCamelCase ( _a ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
super().__init__()
self.register_modules(
unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , movq=lowerCamelCase__ , )
UpperCAmelCase__: Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if latents is None:
UpperCAmelCase__: Any = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase__: Tuple = latents.to(lowerCamelCase__ )
UpperCAmelCase__: Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def _UpperCAmelCase ( self , lowerCamelCase__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase__: Any = torch.device(F"cuda:{gpu_id}" )
UpperCAmelCase__: int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( self , lowerCamelCase__=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase__: Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowerCamelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase__: int = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase__ , UpperCAmelCase__: Tuple = cpu_offload_with_hook(lowerCamelCase__ , lowerCamelCase__ , prev_module_hook=lowerCamelCase__ )
# We'll offload the last model manually.
UpperCAmelCase__: List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase__ )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 5_1_2 , lowerCamelCase__ = 5_1_2 , lowerCamelCase__ = 1_0_0 , lowerCamelCase__ = 4.0 , lowerCamelCase__ = 1 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , ):
UpperCAmelCase__: str = self._execution_device
UpperCAmelCase__: List[Any] = guidance_scale > 1.0
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: List[Any] = torch.cat(lowerCamelCase__ , dim=0 )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: List[Any] = torch.cat(lowerCamelCase__ , dim=0 )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: Any = torch.cat(lowerCamelCase__ , dim=0 )
UpperCAmelCase__: Dict = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCAmelCase__: Tuple = image_embeds.repeat_interleave(lowerCamelCase__ , dim=0 )
UpperCAmelCase__: Union[str, Any] = negative_image_embeds.repeat_interleave(lowerCamelCase__ , dim=0 )
UpperCAmelCase__: Any = hint.repeat_interleave(lowerCamelCase__ , dim=0 )
UpperCAmelCase__: Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase__ )
UpperCAmelCase__: Dict = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase__ )
self.scheduler.set_timesteps(lowerCamelCase__ , device=lowerCamelCase__ )
UpperCAmelCase__: Optional[Any] = self.scheduler.timesteps
UpperCAmelCase__: Optional[int] = self.movq.config.latent_channels
UpperCAmelCase__ , UpperCAmelCase__: Tuple = downscale_height_and_width(lowerCamelCase__ , lowerCamelCase__ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase__: Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__: str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__: List[str] = {"image_embeds": image_embeds, "hint": hint}
UpperCAmelCase__: Any = self.unet(
sample=lowerCamelCase__ , timestep=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , added_cond_kwargs=lowerCamelCase__ , return_dict=lowerCamelCase__ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__: str = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase__ , UpperCAmelCase__: Any = noise_pred.chunk(2 )
UpperCAmelCase__ , UpperCAmelCase__: Any = variance_pred.chunk(2 )
UpperCAmelCase__: List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase__: Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase__ , UpperCAmelCase__: int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__: List[Any] = self.scheduler.step(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ , )[0]
# post-processing
UpperCAmelCase__: Union[str, Any] = self.movq.decode(lowerCamelCase__ , force_not_quantize=lowerCamelCase__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase__: List[str] = image * 0.5 + 0.5
UpperCAmelCase__: int = image.clamp(0 , 1 )
UpperCAmelCase__: Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase__: List[str] = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 113
| 1
|
'''simple docstring'''
from manim import *
class snake_case ( lowercase__ ):
"""simple docstring"""
def a__ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = Rectangle(height=0.5, width=0.5 )
SCREAMING_SNAKE_CASE_ = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE_ = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase, buff=0 )
SCREAMING_SNAKE_CASE_ = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase, buff=0 )
SCREAMING_SNAKE_CASE_ = VGroup(__lowerCamelCase, __lowerCamelCase ).arrange(__lowerCamelCase, buff=0 )
SCREAMING_SNAKE_CASE_ = Text('CPU', font_size=24 )
SCREAMING_SNAKE_CASE_ = Group(__lowerCamelCase, __lowerCamelCase ).arrange(__lowerCamelCase, buff=0.5, aligned_edge=__lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [mem.copy() for i in range(1 )]
SCREAMING_SNAKE_CASE_ = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase, buff=0 )
SCREAMING_SNAKE_CASE_ = Text('GPU', font_size=24 )
SCREAMING_SNAKE_CASE_ = Group(__lowerCamelCase, __lowerCamelCase ).arrange(__lowerCamelCase, buff=0.5, aligned_edge=__lowerCamelCase )
gpu.align_to(__lowerCamelCase, __lowerCamelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase, buff=0 )
SCREAMING_SNAKE_CASE_ = Text('Model', font_size=24 )
SCREAMING_SNAKE_CASE_ = Group(__lowerCamelCase, __lowerCamelCase ).arrange(__lowerCamelCase, buff=0.5, aligned_edge=__lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(__lowerCamelCase, run_time=1 ), Create(__lowerCamelCase, run_time=1 ), Create(__lowerCamelCase, run_time=1 ), )
SCREAMING_SNAKE_CASE_ = MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""", font_size=24, )
SCREAMING_SNAKE_CASE_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE_ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""", font_size=18, )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCamelCase, run_time=2.5 ), Write(__lowerCamelCase ), Write(__lowerCamelCase ) )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for i, rect in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase, opacity=0.7 )
cpu_target.move_to(__lowerCamelCase )
cpu_target.generate_target()
SCREAMING_SNAKE_CASE_ = 0.46 / 4
SCREAMING_SNAKE_CASE_ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=__lowerCamelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target, direction=__lowerCamelCase, buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target, direction=__lowerCamelCase, buff=0.0 )
cpu_targs.append(__lowerCamelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__lowerCamelCase ) )
second_animations.append(MoveToTarget(__lowerCamelCase, run_time=1.5 ) )
self.play(*__lowerCamelCase )
self.play(*__lowerCamelCase )
self.wait()
| 705
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = """levit"""
def __init__( self, _lowercase=224, _lowercase=3, _lowercase=3, _lowercase=2, _lowercase=1, _lowercase=16, _lowercase=[128, 256, 384], _lowercase=[4, 8, 12], _lowercase=[4, 4, 4], _lowercase=[16, 16, 16], _lowercase=0, _lowercase=[2, 2, 2], _lowercase=[2, 2, 2], _lowercase=0.02, **_lowercase, ) -> int:
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = kernel_size
SCREAMING_SNAKE_CASE_ = stride
SCREAMING_SNAKE_CASE_ = padding
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = key_dim
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = attention_ratio
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = version.parse("""1.11""" )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a__ ( self ) -> float:
return 1E-4
| 238
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowercase__ = "The quick brown fox jumps over the lazy dog" , ) -> bool:
lowerCAmelCase__ : Optional[int] = set()
# Replace all the whitespace in our sentence
lowerCAmelCase__ : Optional[int] = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowercase__ ) == 2_6
def SCREAMING_SNAKE_CASE ( lowercase__ = "The quick brown fox jumps over the lazy dog" , ) -> bool:
lowerCAmelCase__ : Any = [False] * 2_6
for char in input_str:
if char.islower():
lowerCAmelCase__ : int = True
elif char.isupper():
lowerCAmelCase__ : str = True
return all(lowercase__ )
def SCREAMING_SNAKE_CASE ( lowercase__ = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def SCREAMING_SNAKE_CASE ( ) -> None:
from timeit import timeit
lowerCAmelCase__ : Dict = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowercase__ ) )
print(timeit("is_pangram_faster()" , setup=lowercase__ ) )
print(timeit("is_pangram_fastest()" , setup=lowercase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 453
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class __a ( __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : str = 'dpr'
def __init__( self , snake_case=30_522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3_072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1e-12 , snake_case=0 , snake_case="absolute" , snake_case = 0 , **snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case , **snake_case )
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : List[str] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_probs_dropout_prob
lowerCAmelCase__ : List[Any] = max_position_embeddings
lowerCAmelCase__ : Optional[Any] = type_vocab_size
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : Dict = layer_norm_eps
lowerCAmelCase__ : Dict = projection_dim
lowerCAmelCase__ : Any = position_embedding_type
| 453
| 1
|
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowercase ( _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : str , _snake_case : Tuple=5 ) ->Dict:
"""simple docstring"""
assert masked_input.count('''<mask>''' ) == 1
__snake_case : Dict = torch.tensor(tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) ).unsqueeze(0 ) # Batch size 1
__snake_case : Optional[Any] = model(_snake_case )[0] # The last hidden-state is the first element of the output tuple
__snake_case : Optional[Any] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__snake_case : List[Any] = logits[0, masked_index, :]
__snake_case : List[str] = logits.softmax(dim=0 )
__snake_case : List[str] = prob.topk(k=_snake_case , dim=0 )
__snake_case : str = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_snake_case ) )] )
__snake_case : Dict = tokenizer.mask_token
__snake_case : str = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
__snake_case : Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(_snake_case ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(_snake_case ) , _snake_case ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_snake_case , _snake_case ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
SCREAMING_SNAKE_CASE : Optional[Any] = CamembertTokenizer.from_pretrained("""camembert-base""")
SCREAMING_SNAKE_CASE : Tuple = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
SCREAMING_SNAKE_CASE : List[str] = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 718
|
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__snake_case : List[str] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=a_ , cache_dir=a_ )
__snake_case : Optional[Any] = [t[-1] for t in os.walk(os.path.join(a_ , os.listdir(a_ )[0] , '''snapshots''' ) )]
__snake_case : Any = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=a_ )
__snake_case : Union[str, Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__snake_case : Union[str, Any] = jax.random.PRNGKey(0 )
__snake_case : Tuple = 4
__snake_case : Any = jax.device_count()
__snake_case : Any = num_samples * [prompt]
__snake_case : Optional[int] = pipeline.prepare_inputs(a_ )
# shard inputs and rng
__snake_case : Optional[int] = replicate(a_ )
__snake_case : List[str] = jax.random.split(a_ , a_ )
__snake_case : List[str] = shard(a_ )
__snake_case : int = pipeline(a_ , a_ , a_ , a_ , jit=a_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(a_ , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
__snake_case : int = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(a_ ) == num_samples
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=a_ )
__snake_case : Union[str, Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__snake_case : Dict = jax.random.PRNGKey(0 )
__snake_case : Union[str, Any] = 50
__snake_case : Optional[Any] = jax.device_count()
__snake_case : List[Any] = num_samples * [prompt]
__snake_case : Tuple = pipeline.prepare_inputs(a_ )
# shard inputs and rng
__snake_case : Tuple = replicate(a_ )
__snake_case : Tuple = jax.random.split(a_ , a_ )
__snake_case : Any = shard(a_ )
__snake_case : Optional[int] = pipeline(a_ , a_ , a_ , a_ , jit=a_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(a_ , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=a_ )
__snake_case : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__snake_case : Optional[int] = jax.random.PRNGKey(0 )
__snake_case : Optional[Any] = 50
__snake_case : Any = jax.device_count()
__snake_case : Optional[Any] = num_samples * [prompt]
__snake_case : Any = pipeline.prepare_inputs(a_ )
# shard inputs and rng
__snake_case : Optional[int] = replicate(a_ )
__snake_case : int = jax.random.split(a_ , a_ )
__snake_case : int = shard(a_ )
__snake_case : List[Any] = pipeline(a_ , a_ , a_ , a_ , jit=a_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(a_ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
__snake_case : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__snake_case : List[Any] = jax.random.PRNGKey(0 )
__snake_case : Tuple = 50
__snake_case : Dict = jax.device_count()
__snake_case : Any = num_samples * [prompt]
__snake_case : List[Any] = pipeline.prepare_inputs(a_ )
# shard inputs and rng
__snake_case : Dict = replicate(a_ )
__snake_case : str = jax.random.split(a_ , a_ )
__snake_case : Dict = shard(a_ )
__snake_case : Any = pipeline(a_ , a_ , a_ , a_ , jit=a_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(a_ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=a_ , steps_offset=1 , )
__snake_case , __snake_case : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=a_ , safety_checker=a_ , )
__snake_case : int = scheduler.create_state()
__snake_case : Optional[int] = scheduler_state
__snake_case : str = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__snake_case : Tuple = jax.random.PRNGKey(0 )
__snake_case : List[str] = 50
__snake_case : Optional[int] = jax.device_count()
__snake_case : Optional[Any] = num_samples * [prompt]
__snake_case : Union[str, Any] = pipeline.prepare_inputs(a_ )
# shard inputs and rng
__snake_case : Optional[Any] = replicate(a_ )
__snake_case : str = jax.random.split(a_ , a_ )
__snake_case : int = shard(a_ )
__snake_case : Optional[Any] = pipeline(a_ , a_ , a_ , a_ , jit=a_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(a_ , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__snake_case : Dict = jax.device_count()
__snake_case : Any = num_samples * [prompt]
__snake_case : List[str] = jax.random.split(jax.random.PRNGKey(0 ) , a_ )
__snake_case , __snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=a_ , )
__snake_case : Tuple = replicate(a_ )
__snake_case : Dict = pipeline.prepare_inputs(a_ )
__snake_case : Union[str, Any] = shard(a_ )
__snake_case : str = pipeline(a_ , a_ , a_ , jit=a_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
__snake_case : Tuple = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
__snake_case , __snake_case : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=a_ , use_memory_efficient_attention=a_ , )
__snake_case : Tuple = replicate(a_ )
__snake_case : Optional[Any] = pipeline.prepare_inputs(a_ )
__snake_case : Union[str, Any] = shard(a_ )
__snake_case : Tuple = pipeline(a_ , a_ , a_ , jit=a_ ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
__snake_case : Optional[int] = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 229
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.