code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCAmelCase_ : Any = os.path.abspath(__lowerCamelCase )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
UpperCAmelCase_ : Any = torch.load(__lowerCamelCase, map_location="cpu" )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
UpperCAmelCase_ : Union[str, Any] = convert_pytorch_state_dict_to_flax(__lowerCamelCase, __lowerCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCAmelCase_ : Any = convert_pytorch_sharded_state_dict_to_flax(__lowerCamelCase, __lowerCamelCase )
return flax_state_dict
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
def is_key_or_prefix_key_in_dict(__lowerCamelCase ) -> bool:
return len(set(__lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCAmelCase_ : Optional[Any] = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCAmelCase_ : List[Any] = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCAmelCase_ : List[Any] = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase_ : List[Any] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase_ : str = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCamelCase ):
UpperCAmelCase_ : List[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase_ : Tuple = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase_ : Optional[int] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCAmelCase_ : Tuple = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCAmelCase_ : Tuple = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCAmelCase_ : Any = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCAmelCase_ : Any = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __a ( __lowerCamelCase, __lowerCamelCase ):
# convert pytorch tensor to numpy
UpperCAmelCase_ : List[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase_ : str = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCAmelCase_ : int = flax_model.params["params"]
else:
UpperCAmelCase_ : int = flax_model.params
UpperCAmelCase_ : Tuple = flatten_dict(__lowerCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase_ : Union[str, Any] = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(__lowerCamelCase )
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Optional[int] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase_ : Optional[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ : Any = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCAmelCase_ : Any = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_ : Any = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = rename_key_and_reshape_tensor(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# add model prefix if necessary
UpperCAmelCase_ : List[str] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_ : Tuple = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCAmelCase_ : Union[str, Any] = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase, __lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : Tuple = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : Any = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase ):
import torch
# Load the index
UpperCAmelCase_ : Any = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCAmelCase_ : List[str] = torch.load(__lowerCamelCase )
UpperCAmelCase_ : List[str] = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase_ : Any = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase_ : Dict = flax_model.params["params"]
UpperCAmelCase_ : Optional[Any] = flatten_dict(__lowerCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCAmelCase_ : Union[str, Any] = flax_model.params
UpperCAmelCase_ : str = flatten_dict(__lowerCamelCase )
UpperCAmelCase_ : List[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase_ : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ : int = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCAmelCase_ : int = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_ : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase_ , UpperCAmelCase_ : str = rename_key_and_reshape_tensor(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# add model prefix if necessary
UpperCAmelCase_ : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_ : Optional[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCAmelCase_ : Tuple = jnp.asarray(__lowerCamelCase )
continue
if "var" in flax_key[-1]:
UpperCAmelCase_ : str = jnp.asarray(__lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCamelCase, __lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : Tuple = jnp.asarray(__lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : str = jnp.asarray(__lowerCamelCase )
return unflatten_dict(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Dict = os.path.abspath(__lowerCamelCase )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
UpperCAmelCase_ : List[Any] = getattr(__lowerCamelCase, "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(__lowerCamelCase, "rb" ) as state_f:
try:
UpperCAmelCase_ : List[Any] = from_bytes(__lowerCamelCase, state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(__lowerCamelCase, __lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCAmelCase_ : Optional[Any] = flatten_dict(jax.tree_util.tree_map(lambda __lowerCamelCase : x.dtype == jnp.bfloataa, __lowerCamelCase ) ).values()
if any(__lowerCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCAmelCase_ : str = jax.tree_util.tree_map(
lambda __lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, __lowerCamelCase )
UpperCAmelCase_ : Dict = flatten_dict(__lowerCamelCase )
UpperCAmelCase_ : Dict = pt_model.state_dict()
UpperCAmelCase_ : Tuple = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCAmelCase_ : Optional[int] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : List[Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase_ : List[str] = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCAmelCase_ : Any = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_ : Dict = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_ : Optional[int] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCamelCase ) not in pt_model_dict:
# conv layer
UpperCAmelCase_ : List[Any] = flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase_ : Optional[Any] = jnp.transpose(__lowerCamelCase, (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ) not in pt_model_dict:
# linear layer
UpperCAmelCase_ : List[Any] = flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase_ : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase_ : Optional[Any] = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCAmelCase_ : Tuple = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCAmelCase_ : str = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCAmelCase_ : Any = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCAmelCase_ : List[Any] = ".".join(__lowerCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCAmelCase_ : List[Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCAmelCase_ : Any = key.split("." )
UpperCAmelCase_ : int = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCAmelCase_ : Union[str, Any] = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCAmelCase_ : Tuple = key_components[-2] + "_v"
if name is not None:
UpperCAmelCase_ : Optional[Any] = key_components[:-3] + [name]
UpperCAmelCase_ : Dict = ".".join(__lowerCamelCase )
UpperCAmelCase_ : List[str] = key
if flax_key in special_pt_names:
UpperCAmelCase_ : Union[str, Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
UpperCAmelCase_ : Tuple = np.asarray(__lowerCamelCase ) if not isinstance(__lowerCamelCase, np.ndarray ) else flax_tensor
UpperCAmelCase_ : Optional[int] = torch.from_numpy(__lowerCamelCase )
# remove from missing keys
missing_keys.remove(__lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowerCamelCase )
pt_model.load_state_dict(__lowerCamelCase )
# re-transform missing_keys to list
UpperCAmelCase_ : Any = list(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(__lowerCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
" use it for predictions and inference." )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
"If your task is similar to the task the model of the checkpoint was trained on, "
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 61 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
if len(UpperCAmelCase ) <= 1 or n <= 1:
return
insert_next(UpperCAmelCase , n - 1 )
rec_insertion_sort(UpperCAmelCase , n - 1 )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if index >= len(UpperCAmelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
A , A = (
collection[index],
collection[index - 1],
)
insert_next(UpperCAmelCase , index + 1 )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = input('Enter integers separated by spaces: ')
_lowerCamelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 258 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
lowercase = ["transformers", "torch", "note_seq"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 263 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A ( snake_case :Dict , snake_case :Optional[Any] , snake_case :List[Any] ) -> Optional[Any]:
__UpperCamelCase = OmegaConf.load(snake_case )
__UpperCamelCase = torch.load(snake_case , map_location='cpu' )['model']
__UpperCamelCase = list(state_dict.keys() )
# extract state_dict for VQVAE
__UpperCamelCase = {}
__UpperCamelCase = 'first_stage_model.'
for key in keys:
if key.startswith(snake_case ):
__UpperCamelCase = state_dict[key]
# extract state_dict for UNetLDM
__UpperCamelCase = {}
__UpperCamelCase = 'model.diffusion_model.'
for key in keys:
if key.startswith(snake_case ):
__UpperCamelCase = state_dict[key]
__UpperCamelCase = config.model.params.first_stage_config.params
__UpperCamelCase = config.model.params.unet_config.params
__UpperCamelCase = VQModel(**snake_case ).eval()
vqvae.load_state_dict(snake_case )
__UpperCamelCase = UNetLDMModel(**snake_case ).eval()
unet.load_state_dict(snake_case )
__UpperCamelCase = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=snake_case , )
__UpperCamelCase = LDMPipeline(snake_case , snake_case , snake_case )
pipeline.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
UpperCamelCase : Optional[int] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 263 | 1 |
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = abs(UpperCamelCase_ )
lowerCAmelCase__ = 0
while n > 0:
res += n % 10
n //= 10
return res
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = abs(UpperCamelCase_ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
return sum(int(UpperCamelCase_ ) for c in str(abs(UpperCamelCase_ ) ) )
def _a ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase_ : Callable , UpperCamelCase_ : int ) -> None:
lowerCAmelCase__ = F"{func.__name__}({value})"
lowerCAmelCase__ = timeit(F"__main__.{call}" , setup="import __main__" )
print(F"{call:56} = {func(UpperCamelCase_ )} -- {timing:.4f} seconds" )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCamelCase_ , UpperCamelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 340 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(__UpperCAmelCase )
lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
lowerCAmelCase__ = [text_inputs]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ = probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [scores]
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 )
lowerCAmelCase__ = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 340 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[Any] = StableDiffusionLDMaDPipeline
__magic_name__ :str = TEXT_TO_IMAGE_PARAMS
__magic_name__ :Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ :Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
lowerCAmelCase__ :int = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
lowerCAmelCase__ :List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase__ :List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowerCAmelCase__ :Union[str, Any] = CLIPTextModel(__UpperCAmelCase )
lowerCAmelCase__ :Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ :Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :str = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Tuple = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :str = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ :str = self.get_dummy_components()
lowerCAmelCase__ :Dict = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :List[str] = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Any = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = ldmad_pipe(**__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = output.rgb, output.depth
lowerCAmelCase__ :Union[str, Any] = rgb[0, -3:, -3:, -1]
lowerCAmelCase__ :List[Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
lowerCAmelCase__ :Any = np.array(
[0.37_33_81_76, 0.7_02_47, 0.74_20_31_93, 0.51_64_36_04, 0.58_25_67_93, 0.60_93_21_36, 0.4_18_10_95, 0.48_35_58_77, 0.46_53_52_62] )
lowerCAmelCase__ :Any = np.array([1_03.4_67_27, 85.81_20_04, 87.84_92_36] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self.get_dummy_components()
lowerCAmelCase__ :Dict = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :int = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = 3 * [inputs['prompt']]
# forward
lowerCAmelCase__ :List[Any] = ldmad_pipe(**__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = output.rgb, output.depth
lowerCAmelCase__ :int = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase__ :List[Any] = depth_slice_a[0, -3:, -1]
lowerCAmelCase__ :Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = 3 * [inputs.pop('prompt' )]
lowerCAmelCase__ :List[Any] = ldmad_pipe.tokenizer(
__UpperCAmelCase , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors='pt' , )
lowerCAmelCase__ :Optional[Any] = text_inputs['input_ids'].to(__UpperCAmelCase )
lowerCAmelCase__ :Any = ldmad_pipe.text_encoder(__UpperCAmelCase )[0]
lowerCAmelCase__ :Tuple = prompt_embeds
# forward
lowerCAmelCase__ :Union[str, Any] = ldmad_pipe(**__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = output.rgb, output.depth
lowerCAmelCase__ :Optional[int] = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase__ :List[Any] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ :List[Any] = self.get_dummy_components()
lowerCAmelCase__ :Dict = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
lowerCAmelCase__ :Dict = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :Tuple = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Any = 'french fries'
lowerCAmelCase__ :List[Any] = ldmad_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = output.rgb, output.depth
lowerCAmelCase__ :List[str] = rgb[0, -3:, -3:, -1]
lowerCAmelCase__ :Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
lowerCAmelCase__ :str = np.array(
[0.3_70_44, 0.71_81_15_03, 0.7_22_32_51, 0.48_60_36_75, 0.5_63_83_91, 0.6_36_49_48, 0.42_83_37_04, 0.4_90_13_15, 0.47_92_62_17] )
lowerCAmelCase__ :Union[str, Any] = np.array([1_07.8_47_38, 84.6_28_02, 89.96_21_35] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :str = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 6_4, 6_4) )
lowerCAmelCase__ :Any = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
lowerCAmelCase__ :Any = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.get_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = ldmad_pipe(**__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = output.rgb, output.depth
lowerCAmelCase__ :List[str] = rgb[0, -3:, -3:, -1].flatten()
lowerCAmelCase__ :Union[str, Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2)
lowerCAmelCase__ :Optional[Any] = np.array(
[0.53_80_54_65, 0.56_70_73_05, 0.5_48_65_15, 0.57_01_22_36, 0.5_81_45_11, 0.56_25_34_87, 0.54_84_30_14, 0.55_09_22_63, 0.6_45_97_06] )
lowerCAmelCase__ :Any = np.array(
[0.9_26_37_81, 0.6_67_86_72, 0.5_48_65_15, 0.92_20_21_45, 0.67_83_11_35, 0.56_25_34_87, 0.9_24_16_94, 0.7_55_14_78, 0.6_45_97_06] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :str = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 6_4, 6_4) )
lowerCAmelCase__ :Dict = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 5_0,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = ldmad_pipe(**__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :Dict = output.rgb, output.depth
lowerCAmelCase__ :Union[str, Any] = 0.49_55_86
lowerCAmelCase__ :str = 0.33_79_55_15
lowerCAmelCase__ :Optional[int] = 1_12.4_85_18
lowerCAmelCase__ :Optional[int] = 98.48_97_46
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_inputs(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = ldmad_pipe(**__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = output.rgb, output.depth
lowerCAmelCase__ :Tuple = 0.4_19_41_27
lowerCAmelCase__ :List[Any] = 0.35_37_55_86
lowerCAmelCase__ :Any = 0.5_63_85_02
lowerCAmelCase__ :List[Any] = 0.34_68_61_03
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 254 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A = """pt"""
elif is_tf_available():
__A = """tf"""
else:
__A = """jax"""
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Dict = ByTaTokenizer
__magic_name__ :str = False
def snake_case ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ :Tuple = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2_0 , __UpperCAmelCase=5 ):
'''simple docstring'''
lowerCAmelCase__ :Dict = []
for i in range(len(__UpperCAmelCase ) ):
try:
lowerCAmelCase__ :Union[str, Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=__UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase__ :str = list(filter(lambda __UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , __UpperCAmelCase ) )
lowerCAmelCase__ :Tuple = list(filter(lambda __UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__UpperCAmelCase ) , __UpperCAmelCase ) )
if max_length is not None and len(__UpperCAmelCase ) > max_length:
lowerCAmelCase__ :Optional[int] = toks[:max_length]
if min_length is not None and len(__UpperCAmelCase ) < min_length and len(__UpperCAmelCase ) > 0:
while len(__UpperCAmelCase ) < min_length:
lowerCAmelCase__ :List[str] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase__ :int = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase__ :Optional[Any] = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
if " " not in output_txt and len(__UpperCAmelCase ) > 1:
lowerCAmelCase__ :int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__UpperCAmelCase )
)
if with_prefix_space:
lowerCAmelCase__ :Dict = ' ' + output_txt
lowerCAmelCase__ :Dict = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
return output_txt, output_ids
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.ta_base_tokenizer
lowerCAmelCase__ :str = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
lowerCAmelCase__ :Any = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.ta_base_tokenizer
lowerCAmelCase__ :int = 'Unicode €.'
lowerCAmelCase__ :Optional[int] = tokenizer(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded['input_ids'] , __UpperCAmelCase )
# decoding
lowerCAmelCase__ :Dict = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , 'Unicode €.</s>' )
lowerCAmelCase__ :Tuple = tokenizer('e è é ê ë' )
lowerCAmelCase__ :Any = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded['input_ids'] , __UpperCAmelCase )
# decoding
lowerCAmelCase__ :List[Any] = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.ta_base_tokenizer
lowerCAmelCase__ :Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCAmelCase__ :Dict = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
if FRAMEWORK != "jax":
lowerCAmelCase__ :Dict = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase__ :Dict = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 3_7) , batch.input_ids.shape )
self.assertEqual((2, 3_7) , batch.attention_mask.shape )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self.ta_base_tokenizer
lowerCAmelCase__ :Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , __UpperCAmelCase )
self.assertIn('attention_mask' , __UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , __UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.ta_base_tokenizer
lowerCAmelCase__ :Tuple = [
'Summary of the text.',
'Another summary.',
]
lowerCAmelCase__ :Union[str, Any] = tokenizer(
text_target=__UpperCAmelCase , max_length=3_2 , padding='max_length' , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.ta_base_tokenizer
lowerCAmelCase__ :int = ['A long paragraph for summarization. </s>']
lowerCAmelCase__ :Tuple = ['Summary of the text. </s>']
# fmt: off
lowerCAmelCase__ :Union[str, Any] = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
lowerCAmelCase__ :Dict = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
lowerCAmelCase__ :List[Any] = tokenizer(__UpperCAmelCase , text_target=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , batch['input_ids'][0] )
self.assertEqual(__UpperCAmelCase , batch['labels'][0] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCAmelCase__ :List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ :int = tempfile.mkdtemp()
lowerCAmelCase__ :Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
lowerCAmelCase__ :str = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
shutil.rmtree(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ :Any = tempfile.mkdtemp()
lowerCAmelCase__ :Any = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCAmelCase__ :Dict = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCAmelCase__ :Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Any = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCAmelCase__ :Any = tokenizer.__class__.from_pretrained(__UpperCAmelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase__ :List[str] = json.load(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase__ :Tuple = json.load(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = [F"<extra_id_{i}>" for i in range(1_2_5 )]
lowerCAmelCase__ :List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCAmelCase__ :Union[str, Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(__UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase__ :Tuple = tokenizer_class.from_pretrained(
__UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase__ :Optional[int] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=__UpperCAmelCase )]
lowerCAmelCase__ :List[str] = tokenizer_class.from_pretrained(
__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer_class.from_pretrained(__UpperCAmelCase )
self.assertTrue(tokenizer.decode([2_5_5] ) == '' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.get_tokenizers(fast=__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowerCAmelCase__ :List[Any] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
lowerCAmelCase__ :List[Any] = tokenizer.convert_tokens_to_string(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowerCAmelCase__ :Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
lowerCAmelCase__ :str = 0
lowerCAmelCase__ :Dict = tokenizer.convert_ids_to_tokens(
__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
for attr in attributes_list:
setattr(__UpperCAmelCase , attr + '_id' , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , attr + '_id' ) , __UpperCAmelCase )
setattr(__UpperCAmelCase , attr + '_id' , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , attr + '_id' ) , __UpperCAmelCase )
setattr(__UpperCAmelCase , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens_ids' ) , [] )
setattr(__UpperCAmelCase , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 254 | 1 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 3 , UpperCAmelCase_ = 7 , UpperCAmelCase_ = 1_0_0_0_0_0_0 ):
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : Tuple = 1
for current_denominator in range(1 , limit + 1 ):
_UpperCamelCase : int = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_UpperCamelCase : List[Any] = current_numerator
_UpperCamelCase : List[Any] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 83 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowercase ( unittest.TestCase , _lowerCamelCase ):
"""simple docstring"""
def _snake_case ( self ) -> Any:
_UpperCAmelCase : int = load_tool("""text-classification""" )
self.tool.setup()
_UpperCAmelCase : Tuple = load_tool("""text-classification""" ,remote=a_ )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.tool("""That's quite cool""" ,["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Dict = self.remote_tool("""That's quite cool""" ,["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[Any] = self.tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = self.remote_tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
| 215 | 0 |
lowercase__ :Any = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowercase__ :Optional[Any] = [{"type": "code", "content": INSTALL_CONTENT}]
lowercase__ :List[str] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 350 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.exp(lowerCAmelCase__ )
lowercase = torch.sum(lowerCAmelCase__ , dim=1 ) # sum of exp(x_i)
lowercase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(lowerCAmelCase__ ) - B / A
class lowercase ( nn.Module ):
def __init__( self ,A__):
super().__init__()
lowercase = config.output_attentions
lowercase = config.output_hidden_states
lowercase = nn.ModuleList([BertLayer(A__) for _ in range(config.num_hidden_layers)])
lowercase = nn.ModuleList([BertHighway(A__) for _ in range(config.num_hidden_layers)])
lowercase = [-1 for _ in range(config.num_hidden_layers)]
def A__ ( self ,A__):
if (type(A__) is float) or (type(A__) is int):
for i in range(len(self.early_exit_entropy)):
lowercase = x
else:
lowercase = x
def A__ ( self ,A__):
lowercase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name])
def A__ ( self ,A__ ,A__=None ,A__=None ,A__=None ,A__=None ,):
lowercase = ()
lowercase = ()
lowercase = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
lowercase = layer_module(
A__ ,A__ ,head_mask[i] ,A__ ,A__)
lowercase = layer_outputs[0]
if self.output_attentions:
lowercase = all_attentions + (layer_outputs[1],)
lowercase = (hidden_states,)
if self.output_hidden_states:
lowercase = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowercase = current_outputs + (all_attentions,)
lowercase = self.highway[i](A__)
# logits, pooled_output
if not self.training:
lowercase = highway_exit[0]
lowercase = entropy(A__)
lowercase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowercase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowercase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(A__ ,i + 1)
else:
lowercase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
lowercase = (hidden_states,)
if self.output_hidden_states:
lowercase = outputs + (all_hidden_states,)
if self.output_attentions:
lowercase = outputs + (all_attentions,)
lowercase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , SCREAMING_SNAKE_CASE__ , )
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__):
super().__init__(A__)
lowercase = config
lowercase = BertEmbeddings(A__)
lowercase = DeeBertEncoder(A__)
lowercase = BertPooler(A__)
self.init_weights()
def A__ ( self):
self.encoder.init_highway_pooler(self.pooler)
def A__ ( self):
return self.embeddings.word_embeddings
def A__ ( self ,A__):
lowercase = value
def A__ ( self ,A__):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(A__)
@add_start_docstrings_to_model_forward(A__)
def A__ ( self ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''')
elif input_ids is not None:
lowercase = input_ids.size()
elif inputs_embeds is not None:
lowercase = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''')
lowercase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase = torch.ones(A__ ,device=A__)
if encoder_attention_mask is None:
lowercase = torch.ones(A__ ,device=A__)
if token_type_ids is None:
lowercase = torch.zeros(A__ ,dtype=torch.long ,device=A__)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase = self.get_extended_attention_mask(A__ ,A__ ,A__)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowercase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowercase = encoder_attention_mask[:, None, None, :]
lowercase = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
lowercase = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase = self.get_head_mask(A__ ,self.config.num_hidden_layers)
lowercase = self.embeddings(
input_ids=A__ ,position_ids=A__ ,token_type_ids=A__ ,inputs_embeds=A__)
lowercase = self.encoder(
A__ ,attention_mask=A__ ,head_mask=A__ ,encoder_hidden_states=A__ ,encoder_attention_mask=A__ ,)
lowercase = encoder_outputs[0]
lowercase = self.pooler(A__)
lowercase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__):
lowercase = message
lowercase = exit_layer # start from 1!
class lowercase ( nn.Module ):
def __init__( self ,A__):
super().__init__()
lowercase = BertPooler(A__)
lowercase = nn.Dropout(config.hidden_dropout_prob)
lowercase = nn.Linear(config.hidden_size ,config.num_labels)
def A__ ( self ,A__):
# Pooler
lowercase = encoder_outputs[0]
lowercase = self.pooler(A__)
# "return" pooler_output
# BertModel
lowercase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowercase = bmodel_output[1]
lowercase = self.dropout(A__)
lowercase = self.classifier(A__)
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__):
super().__init__(A__)
lowercase = config.num_labels
lowercase = config.num_hidden_layers
lowercase = DeeBertModel(A__)
lowercase = nn.Dropout(config.hidden_dropout_prob)
lowercase = nn.Linear(config.hidden_size ,self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(A__)
def A__ ( self ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,A__=-1 ,A__=False ,):
lowercase = self.num_layers
try:
lowercase = self.bert(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,position_ids=A__ ,head_mask=A__ ,inputs_embeds=A__ ,)
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowercase = outputs[1]
lowercase = self.dropout(A__)
lowercase = self.classifier(A__)
lowercase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowercase = e.message
lowercase = e.exit_layer
lowercase = outputs[0]
if not self.training:
lowercase = entropy(A__)
lowercase = []
lowercase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowercase = MSELoss()
lowercase = loss_fct(logits.view(-1) ,labels.view(-1))
else:
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 ,self.num_labels) ,labels.view(-1))
# work with highway exits
lowercase = []
for highway_exit in outputs[-1]:
lowercase = highway_exit[0]
if not self.training:
highway_logits_all.append(A__)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
lowercase = MSELoss()
lowercase = loss_fct(highway_logits.view(-1) ,labels.view(-1))
else:
lowercase = CrossEntropyLoss()
lowercase = loss_fct(highway_logits.view(-1 ,self.num_labels) ,labels.view(-1))
highway_losses.append(A__)
if train_highway:
lowercase = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
lowercase = (loss,) + outputs
if not self.training:
lowercase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowercase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 97 | 0 |
lowerCAmelCase__ = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
lowerCAmelCase__ = {value: key for key, value in encode_dict.items()}
def _UpperCAmelCase (UpperCamelCase__ : str ):
_A : Union[str, Any] = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _UpperCAmelCase (UpperCamelCase__ : str ):
if set(UpperCamelCase__ ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
_A : Optional[int] = ""
for word in coded.split():
while len(UpperCamelCase__ ) != 0:
decoded += decode_dict[word[:5]]
_A : Optional[int] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
if isinstance(__A , __A):
_a = np.full((len(__A), sequence_length, 2) , __A)
else:
_a = np.full((len(__A), sequence_length) , __A)
for i, tensor in enumerate(__A):
if padding_side == "right":
if isinstance(__A , __A):
_a = tensor[:sequence_length]
else:
_a = tensor[:sequence_length]
else:
if isinstance(__A , __A):
_a = tensor[:sequence_length]
else:
_a = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCAmelCase (__A):
"""simple docstring"""
_a = ord(__A)
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
_a = unicodedata.category(__A)
if cat.startswith('''P'''):
return True
return False
@dataclass
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : PreTrainedTokenizerBase
__lowerCamelCase : Union[bool, str, PaddingStrategy] = True
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : int = -100
__lowerCamelCase : str = "pt"
def a__ (self , A ) -> List[str]:
"""simple docstring"""
import torch
_a = '''label''' if '''label''' in features[0].keys() else '''labels'''
_a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_a = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
_a = torch.tensor(batch['''entity_ids'''] ).shape[1]
_a = self.tokenizer.padding_side
if padding_side == "right":
_a = [
list(A ) + [self.label_pad_token_id] * (sequence_length - len(A )) for label in labels
]
else:
_a = [
[self.label_pad_token_id] * (sequence_length - len(A )) + list(A ) for label in labels
]
_a = [feature['''ner_tags'''] for feature in features]
_a = padding_tensor(A , -1 , A , A )
_a = [feature['''original_entity_spans'''] for feature in features]
_a = padding_tensor(A , (-1, -1) , A , A )
_a = {k: torch.tensor(A , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 211 | 0 |
"""simple docstring"""
from collections import namedtuple
lowercase_ = namedtuple("from_to", "from_ to")
lowercase_ = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1_0_0_0),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00454, 264.172),
"cubicyard": from_to(0.76455, 1.30795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.000236588, 4226.75),
}
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(lowerCAmelCase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(lowerCAmelCase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase_ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __lowerCAmelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = load_tool('''text-question-answering''' )
self.tool.setup()
__a = load_tool('''text-question-answering''' , remote=_a )
def __UpperCAmelCase ( self ):
__a = self.tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
| 11 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( UpperCamelCase_ ):
_a = ['''image_processor''', '''tokenizer''']
_a = '''AutoImageProcessor'''
_a = '''AutoTokenizer'''
def __init__( self : Tuple , A_ : int , A_ : int):
super().__init__(A_ , A_)
lowerCAmelCase_ : Optional[int] = self.image_processor
def __call__( self : List[str] , A_ : Tuple=None , A_ : List[Any]=None , A_ : Any=None , **A_ : List[str]):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
lowerCAmelCase_ : List[Any] = self.tokenizer(A_ , return_tensors=A_ , **A_)
if images is not None:
lowerCAmelCase_ : List[Any] = self.image_processor(A_ , return_tensors=A_ , **A_)
if text is not None and images is not None:
lowerCAmelCase_ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_) , tensor_type=A_)
def UpperCAmelCase__ ( self : Tuple , *A_ : Any , **A_ : int):
return self.tokenizer.batch_decode(*A_ , **A_)
def UpperCAmelCase__ ( self : str , *A_ : Optional[Any] , **A_ : Optional[Any]):
return self.tokenizer.decode(*A_ , **A_)
@property
def UpperCAmelCase__ ( self : int):
return ["input_ids", "attention_mask", "pixel_values"]
| 103 |
# flake8: noqa
# Lint as: python3
A : Optional[Any] = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 6 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :NDArray[floataa] , SCREAMING_SNAKE_CASE :NDArray[floataa] , SCREAMING_SNAKE_CASE :list[int] , SCREAMING_SNAKE_CASE :int , ) -> list[float]:
__lowerCAmelCase : List[Any] = coefficient_matrix.shape
__lowerCAmelCase : List[str] = constant_matrix.shape
if rowsa != colsa:
__lowerCAmelCase : Dict = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(SCREAMING_SNAKE_CASE )
if colsa != 1:
__lowerCAmelCase : Any = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(SCREAMING_SNAKE_CASE )
if rowsa != rowsa:
__lowerCAmelCase : str = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != rowsa:
__lowerCAmelCase : Dict = (
"""Number of initial values must be equal to number of rows in coefficient """
F'''matrix but received {len(SCREAMING_SNAKE_CASE )} and {rowsa}'''
)
raise ValueError(SCREAMING_SNAKE_CASE )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
__lowerCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__lowerCAmelCase : int = table.shape
strictly_diagonally_dominant(SCREAMING_SNAKE_CASE )
# Iterates the whole matrix for given number of times
for _ in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = []
for row in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = 0
for col in range(SCREAMING_SNAKE_CASE ):
if col == row:
__lowerCAmelCase : int = table[row][col]
elif col == cols - 1:
__lowerCAmelCase : Optional[int] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__lowerCAmelCase : Optional[Any] = (temp + val) / denom
new_val.append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = new_val
return [float(SCREAMING_SNAKE_CASE ) for i in new_val]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :NDArray[floataa] ) -> bool:
__lowerCAmelCase : Any = table.shape
__lowerCAmelCase : str = True
for i in range(0 , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod() | 361 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[int] ) -> int:
if not numbers:
return 0
if not isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) or not all(
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
__lowerCAmelCase : int = numbers[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# update the maximum and minimum subarray products
__lowerCAmelCase : List[str] = numbers[i]
if number < 0:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = min_till_now, max_till_now
__lowerCAmelCase : Optional[int] = max(SCREAMING_SNAKE_CASE , max_till_now * number )
__lowerCAmelCase : List[Any] = min(SCREAMING_SNAKE_CASE , min_till_now * number )
# update the maximum product found till now
__lowerCAmelCase : List[str] = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max_prod | 232 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : int = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : List[Any] = '''distilbert'''
A__ : Tuple = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self : str , _snake_case : Tuple=3_0522 , _snake_case : List[str]=512 , _snake_case : str=False , _snake_case : Union[str, Any]=6 , _snake_case : Tuple=12 , _snake_case : Optional[Any]=768 , _snake_case : int=4 * 768 , _snake_case : Dict=0.1 , _snake_case : Optional[int]=0.1 , _snake_case : List[str]="gelu" , _snake_case : str=0.02 , _snake_case : Any=0.1 , _snake_case : Dict=0.2 , _snake_case : Optional[int]=0 , **_snake_case : int , ):
__lowercase : Optional[int] = vocab_size
__lowercase : int = max_position_embeddings
__lowercase : str = sinusoidal_pos_embds
__lowercase : Any = n_layers
__lowercase : str = n_heads
__lowercase : List[str] = dim
__lowercase : Dict = hidden_dim
__lowercase : int = dropout
__lowercase : Tuple = attention_dropout
__lowercase : List[str] = activation
__lowercase : Optional[Any] = initializer_range
__lowercase : Any = qa_dropout
__lowercase : str = seq_classif_dropout
super().__init__(**_snake_case , pad_token_id=_snake_case )
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
@property
def snake_case_ ( self : Any ):
if self.task == "multiple-choice":
__lowercase : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase : str = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 156 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase_ )
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
A__ : ClassVar[Features] = Features({'''audio''': Audio()} )
A__ : ClassVar[Features] = Features({'''labels''': ClassLabel} )
A__ : str = "audio"
A__ : str = "labels"
def snake_case_ ( self : List[Any] , _snake_case : List[str] ):
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , _snake_case ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
__lowercase : Optional[Any] = copy.deepcopy(self )
__lowercase : Optional[int] = self.label_schema.copy()
__lowercase : Tuple = features[self.label_column]
__lowercase : Optional[Any] = label_schema
return task_template
@property
def snake_case_ ( self : Optional[Any] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 156 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> int:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
lowerCAmelCase__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_UpperCamelCase )
def a ( self : List[str] ) -> int:
lowerCAmelCase__ = """sshleifer/tiny-gpt2"""
lowerCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_UpperCamelCase , multi_process=_UpperCamelCase , )
lowerCAmelCase__ = TensorFlowBenchmark(_UpperCamelCase )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase__ = """sgugger/tiny-distilbert-classification"""
lowerCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , only_pretrain_model=_UpperCamelCase , )
lowerCAmelCase__ = TensorFlowBenchmark(_UpperCamelCase )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : List[Any] ) -> Any:
lowerCAmelCase__ = """sshleifer/tiny-gpt2"""
lowerCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowerCAmelCase__ = TensorFlowBenchmark(_UpperCamelCase )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Optional[Any] ) -> str:
lowerCAmelCase__ = """sshleifer/tiny-gpt2"""
lowerCAmelCase__ = AutoConfig.from_pretrained(_UpperCamelCase )
lowerCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_UpperCamelCase , multi_process=_UpperCamelCase , )
lowerCAmelCase__ = TensorFlowBenchmark(_UpperCamelCase , [config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Tuple ) -> str:
lowerCAmelCase__ = """sshleifer/tiny-gpt2"""
lowerCAmelCase__ = AutoConfig.from_pretrained(_UpperCamelCase )
lowerCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowerCAmelCase__ = TensorFlowBenchmark(_UpperCamelCase , [config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : List[str] ) -> List[Any]:
lowerCAmelCase__ = """sshleifer/tiny-gpt2"""
lowerCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowerCAmelCase__ = TensorFlowBenchmark(_UpperCamelCase )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Dict ) -> List[Any]:
lowerCAmelCase__ = """sshleifer/tiny-gpt2"""
lowerCAmelCase__ = AutoConfig.from_pretrained(_UpperCamelCase )
lowerCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowerCAmelCase__ = TensorFlowBenchmark(_UpperCamelCase , [config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Optional[int] ) -> str:
lowerCAmelCase__ = """patrickvonplaten/t5-tiny-random"""
lowerCAmelCase__ = AutoConfig.from_pretrained(_UpperCamelCase )
lowerCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowerCAmelCase__ = TensorFlowBenchmark(_UpperCamelCase , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def a ( self : int ) -> Optional[Any]:
lowerCAmelCase__ = """sshleifer/tiny-gpt2"""
lowerCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_UpperCamelCase , multi_process=_UpperCamelCase , )
lowerCAmelCase__ = TensorFlowBenchmark(_UpperCamelCase )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Tuple ) -> int:
lowerCAmelCase__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_UpperCamelCase , save_to_csv=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_UpperCamelCase , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(_UpperCamelCase , "inf_mem.csv" ) , env_info_csv_file=os.path.join(_UpperCamelCase , "env.csv" ) , multi_process=_UpperCamelCase , )
lowerCAmelCase__ = TensorFlowBenchmark(_UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_UpperCamelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCamelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCamelCase , "env.csv" ) ).exists() )
def a ( self : Any ) -> str:
lowerCAmelCase__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
self.assertTrue(hasattr(_UpperCamelCase , "sequential" ) )
self.assertTrue(hasattr(_UpperCamelCase , "cumulative" ) )
self.assertTrue(hasattr(_UpperCamelCase , "current" ) )
self.assertTrue(hasattr(_UpperCamelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_UpperCamelCase , "log.txt" ) , log_print=_UpperCamelCase , trace_memory_line_by_line=_UpperCamelCase , eager_mode=_UpperCamelCase , multi_process=_UpperCamelCase , )
lowerCAmelCase__ = TensorFlowBenchmark(_UpperCamelCase )
lowerCAmelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_UpperCamelCase , "log.txt" ) ).exists() )
| 358 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = 42
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any=3 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : List[str]=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple=(64,) , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : List[Any]=32 , SCREAMING_SNAKE_CASE__ : List[str]="silu" , SCREAMING_SNAKE_CASE__ : Optional[int]=True , ) -> str:
super().__init__()
lowerCAmelCase__ = layers_per_block
lowerCAmelCase__ = torch.nn.Convad(
SCREAMING_SNAKE_CASE__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowerCAmelCase__ = None
lowerCAmelCase__ = nn.ModuleList([] )
# down
lowerCAmelCase__ = block_out_channels[0]
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = block_out_channels[i]
lowerCAmelCase__ = i == len(SCREAMING_SNAKE_CASE__ ) - 1
lowerCAmelCase__ = get_down_block(
SCREAMING_SNAKE_CASE__ , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE__ , resnet_groups=SCREAMING_SNAKE_CASE__ , attention_head_dim=SCREAMING_SNAKE_CASE__ , temb_channels=SCREAMING_SNAKE_CASE__ , )
self.down_blocks.append(SCREAMING_SNAKE_CASE__ )
# mid
lowerCAmelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=SCREAMING_SNAKE_CASE__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE__ , temb_channels=SCREAMING_SNAKE_CASE__ , )
# out
lowerCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE__ , eps=1e-6 )
lowerCAmelCase__ = nn.SiLU()
lowerCAmelCase__ = 2 * out_channels if double_z else out_channels
lowerCAmelCase__ = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE__ , 3 , padding=1 )
lowerCAmelCase__ = False
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict ) -> int:
lowerCAmelCase__ = x
lowerCAmelCase__ = self.conv_in(SCREAMING_SNAKE_CASE__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE__ : Optional[int] ):
def custom_forward(*SCREAMING_SNAKE_CASE__ : Dict ):
return module(*SCREAMING_SNAKE_CASE__ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , use_reentrant=SCREAMING_SNAKE_CASE__ )
# middle
lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE__ , use_reentrant=SCREAMING_SNAKE_CASE__ )
else:
for down_block in self.down_blocks:
lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# middle
lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE__ )
else:
# down
for down_block in self.down_blocks:
lowerCAmelCase__ = down_block(SCREAMING_SNAKE_CASE__ )
# middle
lowerCAmelCase__ = self.mid_block(SCREAMING_SNAKE_CASE__ )
# post-process
lowerCAmelCase__ = self.conv_norm_out(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.conv_act(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.conv_out(SCREAMING_SNAKE_CASE__ )
return sample
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : int=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple=(64,) , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=32 , SCREAMING_SNAKE_CASE__ : List[str]="silu" , SCREAMING_SNAKE_CASE__ : List[str]="group" , ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase__ = layers_per_block
lowerCAmelCase__ = nn.Convad(
SCREAMING_SNAKE_CASE__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowerCAmelCase__ = None
lowerCAmelCase__ = nn.ModuleList([] )
lowerCAmelCase__ = in_channels if norm_type == "spatial" else None
# mid
lowerCAmelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=SCREAMING_SNAKE_CASE__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE__ , temb_channels=SCREAMING_SNAKE_CASE__ , )
# up
lowerCAmelCase__ = list(reversed(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = output_channel
lowerCAmelCase__ = reversed_block_out_channels[i]
lowerCAmelCase__ = i == len(SCREAMING_SNAKE_CASE__ ) - 1
lowerCAmelCase__ = get_up_block(
SCREAMING_SNAKE_CASE__ , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , prev_output_channel=SCREAMING_SNAKE_CASE__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=SCREAMING_SNAKE_CASE__ , resnet_groups=SCREAMING_SNAKE_CASE__ , attention_head_dim=SCREAMING_SNAKE_CASE__ , temb_channels=SCREAMING_SNAKE_CASE__ , resnet_time_scale_shift=SCREAMING_SNAKE_CASE__ , )
self.up_blocks.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = output_channel
# out
if norm_type == "spatial":
lowerCAmelCase__ = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE__ , eps=1e-6 )
lowerCAmelCase__ = nn.SiLU()
lowerCAmelCase__ = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE__ , 3 , padding=1 )
lowerCAmelCase__ = False
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=None ) -> Dict:
lowerCAmelCase__ = z
lowerCAmelCase__ = self.conv_in(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
def custom_forward(*SCREAMING_SNAKE_CASE__ : List[str] ):
return module(*SCREAMING_SNAKE_CASE__ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , use_reentrant=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = sample.to(SCREAMING_SNAKE_CASE__ )
# up
for up_block in self.up_blocks:
lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , use_reentrant=SCREAMING_SNAKE_CASE__ )
else:
# middle
lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = sample.to(SCREAMING_SNAKE_CASE__ )
# up
for up_block in self.up_blocks:
lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
# middle
lowerCAmelCase__ = self.mid_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = sample.to(SCREAMING_SNAKE_CASE__ )
# up
for up_block in self.up_blocks:
lowerCAmelCase__ = up_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# post-process
if latent_embeds is None:
lowerCAmelCase__ = self.conv_norm_out(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase__ = self.conv_norm_out(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.conv_act(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.conv_out(SCREAMING_SNAKE_CASE__ )
return sample
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]="random" , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=True ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = n_e
lowerCAmelCase__ = vq_embed_dim
lowerCAmelCase__ = beta
lowerCAmelCase__ = legacy
lowerCAmelCase__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowerCAmelCase__ = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
lowerCAmelCase__ = self.used.shape[0]
lowerCAmelCase__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowerCAmelCase__ = self.re_embed
lowerCAmelCase__ = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
lowerCAmelCase__ = n_e
lowerCAmelCase__ = sane_index_shape
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase__ = inds.shape
assert len(SCREAMING_SNAKE_CASE__ ) > 1
lowerCAmelCase__ = inds.reshape(ishape[0] , -1 )
lowerCAmelCase__ = self.used.to(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = (inds[:, :, None] == used[None, None, ...]).long()
lowerCAmelCase__ = match.argmax(-1 )
lowerCAmelCase__ = match.sum(2 ) < 1
if self.unknown_index == "random":
lowerCAmelCase__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowerCAmelCase__ = self.unknown_index
return new.reshape(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
lowerCAmelCase__ = inds.shape
assert len(SCREAMING_SNAKE_CASE__ ) > 1
lowerCAmelCase__ = inds.reshape(ishape[0] , -1 )
lowerCAmelCase__ = self.used.to(SCREAMING_SNAKE_CASE__ )
if self.re_embed > self.used.shape[0]: # extra token
lowerCAmelCase__ = 0 # simply set to zero
lowerCAmelCase__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE__ )
return back.reshape(SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
# reshape z -> (batch, height, width, channel) and flatten
lowerCAmelCase__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowerCAmelCase__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowerCAmelCase__ = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE__ , self.embedding.weight ) , dim=1 )
lowerCAmelCase__ = self.embedding(SCREAMING_SNAKE_CASE__ ).view(z.shape )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
# compute loss for embedding
if not self.legacy:
lowerCAmelCase__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowerCAmelCase__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowerCAmelCase__ = z + (z_q - z).detach()
# reshape back to match original input shape
lowerCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowerCAmelCase__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowerCAmelCase__ = self.remap_to_used(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowerCAmelCase__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def a ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowerCAmelCase__ = indices.reshape(shape[0] , -1 ) # add batch axis
lowerCAmelCase__ = self.unmap_to_all(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowerCAmelCase__ = self.embedding(SCREAMING_SNAKE_CASE__ )
if shape is not None:
lowerCAmelCase__ = z_q.view(SCREAMING_SNAKE_CASE__ )
# reshape back to match original input shape
lowerCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any=False ) -> Optional[int]:
lowerCAmelCase__ = parameters
lowerCAmelCase__ , lowerCAmelCase__ = torch.chunk(SCREAMING_SNAKE_CASE__ , 2 , dim=1 )
lowerCAmelCase__ = torch.clamp(self.logvar , -30.0 , 20.0 )
lowerCAmelCase__ = deterministic
lowerCAmelCase__ = torch.exp(0.5 * self.logvar )
lowerCAmelCase__ = torch.exp(self.logvar )
if self.deterministic:
lowerCAmelCase__ = lowerCAmelCase__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
lowerCAmelCase__ = randn_tensor(
self.mean.shape , generator=SCREAMING_SNAKE_CASE__ , device=self.parameters.device , dtype=self.parameters.dtype )
lowerCAmelCase__ = self.mean + self.std * sample
return x
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=None ) -> Union[str, Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=[1, 2, 3] ) -> Tuple:
if self.deterministic:
return torch.Tensor([0.0] )
lowerCAmelCase__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> Dict:
return self.mean
| 221 | 0 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _A ( UpperCamelCase_ : List[Any], UpperCamelCase_ : Tuple=False) -> int:
'''simple docstring'''
try:
__lowercase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowercase = default
else:
# KEY is set, convert it to True or False.
try:
__lowercase = strtobool(UpperCamelCase_)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""")
return _value
_a = parse_flag_from_env('RUN_SLOW', default=False)
def _A ( UpperCamelCase_ : int) -> Optional[int]:
'''simple docstring'''
return unittest.skip("Test was skipped")(UpperCamelCase_)
def _A ( UpperCamelCase_ : Any) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests, "test is slow")(UpperCamelCase_)
def _A ( UpperCamelCase_ : Tuple) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available(), "test requires only a CPU")(UpperCamelCase_)
def _A ( UpperCamelCase_ : int) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available(), "test requires a GPU")(UpperCamelCase_)
def _A ( UpperCamelCase_ : Optional[int]) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available(), "test requires a XPU")(UpperCamelCase_)
def _A ( UpperCamelCase_ : Optional[int]) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_mps_available(), "test requires a `mps` backend support in `torch`")(UpperCamelCase_)
def _A ( UpperCamelCase_ : int) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available(), "test requires the Hugging Face suite")(UpperCamelCase_)
def _A ( UpperCamelCase_ : Union[str, Any]) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available(), "test requires the bitsandbytes library")(UpperCamelCase_)
def _A ( UpperCamelCase_ : Optional[Any]) -> int:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available(), "test requires TPU")(UpperCamelCase_)
def _A ( UpperCamelCase_ : str) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1, "test requires a GPU")(UpperCamelCase_)
def _A ( UpperCamelCase_ : Union[str, Any]) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1, "test requires a XPU")(UpperCamelCase_)
def _A ( UpperCamelCase_ : List[Any]) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(UpperCamelCase_)
def _A ( UpperCamelCase_ : Optional[int]) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(UpperCamelCase_)
def _A ( UpperCamelCase_ : str) -> int:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(UpperCamelCase_)
def _A ( UpperCamelCase_ : Dict) -> str:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available(), "test requires DeepSpeed")(UpperCamelCase_)
def _A ( UpperCamelCase_ : List[str]) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_torch_version(">=", "1.12.0"), "test requires torch version >= 1.12.0")(UpperCamelCase_)
def _A ( UpperCamelCase_ : Dict=None, UpperCamelCase_ : int=None) -> int:
'''simple docstring'''
if test_case is None:
return partial(UpperCamelCase_, version=UpperCamelCase_)
return unittest.skipUnless(is_torch_version(">=", UpperCamelCase_), F"""test requires torch version >= {version}""")(UpperCamelCase_)
def _A ( UpperCamelCase_ : List[Any]) -> Any:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available(), "test requires Tensorboard")(UpperCamelCase_)
def _A ( UpperCamelCase_ : Optional[Any]) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(UpperCamelCase_)
def _A ( UpperCamelCase_ : Optional[int]) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available(), "test requires comet_ml")(UpperCamelCase_)
_a = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _A ( UpperCamelCase_ : List[str]) -> str:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available, "test requires at least one tracker to be available and for `comet_ml` to not be installed", )(UpperCamelCase_)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = True
@classmethod
def _lowercase ( cls : Dict ):
__lowercase = tempfile.mkdtemp()
@classmethod
def _lowercase ( cls : List[str] ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _lowercase ( self : Union[str, Any] ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCAmelCase__ )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Union[mock.Mock, List[mock.Mock]] ):
__lowercase = mocks if isinstance(UpperCAmelCase__, (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _A ( UpperCamelCase_ : int) -> int:
'''simple docstring'''
__lowercase = AcceleratorState()
__lowercase = tensor[None].clone().to(state.device)
__lowercase = gather(UpperCamelCase_).cpu()
__lowercase = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i], UpperCamelCase_):
return False
return True
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Tuple ):
__lowercase = returncode
__lowercase = stdout
__lowercase = stderr
async def _A ( UpperCamelCase_ : int, UpperCamelCase_ : List[str]) -> Any:
'''simple docstring'''
while True:
__lowercase = await stream.readline()
if line:
callback(UpperCamelCase_)
else:
break
async def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : List[str]=None, UpperCamelCase_ : Optional[Any]=None, UpperCamelCase_ : List[str]=None, UpperCamelCase_ : Dict=False, UpperCamelCase_ : int=False) -> _RunOutput:
'''simple docstring'''
if echo:
print("\nRunning: ", " ".join(UpperCamelCase_))
__lowercase = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=UpperCamelCase_, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=UpperCamelCase_, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowercase = []
__lowercase = []
def tee(UpperCamelCase_ : Any, UpperCamelCase_ : List[Any], UpperCamelCase_ : Dict, UpperCamelCase_ : int=""):
__lowercase = line.decode("utf-8").rstrip()
sink.append(UpperCamelCase_)
if not quiet:
print(UpperCamelCase_, UpperCamelCase_, file=UpperCamelCase_)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout, lambda UpperCamelCase_: tee(UpperCamelCase_, UpperCamelCase_, sys.stdout, label="stdout:"))),
asyncio.create_task(_read_stream(p.stderr, lambda UpperCamelCase_: tee(UpperCamelCase_, UpperCamelCase_, sys.stderr, label="stderr:"))),
], timeout=UpperCamelCase_, )
return _RunOutput(await p.wait(), UpperCamelCase_, UpperCamelCase_)
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : Optional[Any]=None, UpperCamelCase_ : Dict=None, UpperCamelCase_ : str=180, UpperCamelCase_ : Optional[int]=False, UpperCamelCase_ : int=True) -> _RunOutput:
'''simple docstring'''
__lowercase = asyncio.get_event_loop()
__lowercase = loop.run_until_complete(
_stream_subprocess(UpperCamelCase_, env=UpperCamelCase_, stdin=UpperCamelCase_, timeout=UpperCamelCase_, quiet=UpperCamelCase_, echo=UpperCamelCase_))
__lowercase = " ".join(UpperCamelCase_)
if result.returncode > 0:
__lowercase = "\n".join(result.stderr)
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""")
return result
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
pass
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Optional[int]=False) -> Union[str, Any]:
'''simple docstring'''
try:
__lowercase = subprocess.check_output(UpperCamelCase_, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(UpperCamelCase_, "decode"):
__lowercase = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(UpperCamelCase_)}` failed with the following error:\n\n{e.output.decode()}""") from e
| 17 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any, UpperCAmelCase__ : int ):
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
self.m_edges.append([u_node, v_node, weight] )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _lowercase ( self : List[Any], UpperCAmelCase__ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : list[int], UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase__ )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(UpperCAmelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase__ )
def _lowercase ( self : Any ):
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _A ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 1 |
from ... import PretrainedConfig
lowerCAmelCase = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] =NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCamelCase_ : List[str] ='''nezha'''
def __init__(self , lowerCAmelCase=2_1_1_2_8 , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=6_4 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=True , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= max_relative_position
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout
__lowercase= use_cache
| 304 |
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= v.conjugate().T
__lowercase= v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase= np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 304 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class A__ ( __snake_case ):
_UpperCAmelCase :Tuple = 'trajectory_transformer'
_UpperCAmelCase :Union[str, Any] = ['past_key_values']
_UpperCAmelCase :Optional[Any] = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , A_=100 , A_=5 , A_=1 , A_=1 , A_=249 , A_=6 , A_=17 , A_=25 , A_=4 , A_=4 , A_=128 , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.00_06 , A_=512 , A_=0.02 , A_=1e-12 , A_=1 , A_=True , A_=1 , A_=5_0256 , A_=5_0256 , **A_ , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = vocab_size
UpperCamelCase : Optional[int] = action_weight
UpperCamelCase : Optional[int] = reward_weight
UpperCamelCase : Union[str, Any] = value_weight
UpperCamelCase : Optional[int] = max_position_embeddings
UpperCamelCase : Union[str, Any] = block_size
UpperCamelCase : Dict = action_dim
UpperCamelCase : List[Any] = observation_dim
UpperCamelCase : str = transition_dim
UpperCamelCase : Optional[Any] = learning_rate
UpperCamelCase : Optional[int] = n_layer
UpperCamelCase : Optional[int] = n_head
UpperCamelCase : Union[str, Any] = n_embd
UpperCamelCase : int = embd_pdrop
UpperCamelCase : List[Any] = attn_pdrop
UpperCamelCase : Union[str, Any] = resid_pdrop
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : int = layer_norm_eps
UpperCamelCase : Union[str, Any] = kaiming_initializer_range
UpperCamelCase : List[str] = use_cache
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
| 52 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( _snake_case : int | str ) ->bool:
"""simple docstring"""
__snake_case : List[str] = str(_snake_case )
return n == n[::-1]
def lowercase ( _snake_case : int = 1_000_000 ) ->str:
"""simple docstring"""
__snake_case : Union[str, Any] = 0
for i in range(1 , _snake_case ):
if is_palindrome(_snake_case ) and is_palindrome(bin(_snake_case ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 102 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : List[Any] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 140 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowerCamelCase : Dict = logging.get_logger("""transformers.models.speecht5""")
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
hf_model.apply_weight_norm()
UpperCamelCase : int = checkpoint["input_conv.weight_g"]
UpperCamelCase : Dict = checkpoint["input_conv.weight_v"]
UpperCamelCase : List[Any] = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase : Any = checkpoint[F"""upsamples.{i}.1.weight_g"""]
UpperCamelCase : List[Any] = checkpoint[F"""upsamples.{i}.1.weight_v"""]
UpperCamelCase : Optional[Any] = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCamelCase : int = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCamelCase : str = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCamelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCamelCase : int = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCamelCase : Optional[Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCamelCase : Tuple = checkpoint["output_conv.1.weight_g"]
UpperCamelCase : Tuple = checkpoint["output_conv.1.weight_v"]
UpperCamelCase : int = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> Tuple:
if config_path is not None:
UpperCamelCase : List[Any] = SpeechTaHifiGanConfig.from_pretrained(_lowerCAmelCase )
else:
UpperCamelCase : Optional[int] = SpeechTaHifiGanConfig()
UpperCamelCase : List[str] = SpeechTaHifiGan(_lowerCAmelCase )
UpperCamelCase : str = torch.load(_lowerCAmelCase )
load_weights(orig_checkpoint["model"]["generator"] , _lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : List[Any] = np.load(_lowerCAmelCase )
UpperCamelCase : List[str] = stats[0].reshape(-1 )
UpperCamelCase : Tuple = stats[1].reshape(-1 )
UpperCamelCase : Any = torch.from_numpy(_lowerCAmelCase ).float()
UpperCamelCase : Any = torch.from_numpy(_lowerCAmelCase ).float()
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 140 | 1 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
lowercase_ : Any = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=snake_case_ , exist_ok=snake_case_ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case_ , snake_case_ , f=output_path.as_posix() , input_names=snake_case_ , output_names=snake_case_ , dynamic_axes=snake_case_ , do_constant_folding=snake_case_ , use_external_data_format=snake_case_ , enable_onnx_checker=snake_case_ , opset_version=snake_case_ , )
else:
export(
snake_case_ , snake_case_ , f=output_path.as_posix() , input_names=snake_case_ , output_names=snake_case_ , dynamic_axes=snake_case_ , do_constant_folding=snake_case_ , opset_version=snake_case_ , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ = False ):
'''simple docstring'''
_UpperCAmelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_UpperCAmelCase = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
_UpperCAmelCase = """cpu"""
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained(snake_case_ , torch_dtype=snake_case_ ).to(snake_case_ )
_UpperCAmelCase = Path(snake_case_ )
# TEXT ENCODER
_UpperCAmelCase = pipeline.text_encoder.config.max_position_embeddings
_UpperCAmelCase = pipeline.text_encoder.config.hidden_size
_UpperCAmelCase = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=snake_case_ , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=snake_case_ , )
del pipeline.text_encoder
# UNET
_UpperCAmelCase = pipeline.unet.config.in_channels
_UpperCAmelCase = pipeline.unet.config.sample_size
_UpperCAmelCase = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , snake_case_ , snake_case_ , snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ),
torch.randn(2 ).to(device=snake_case_ , dtype=snake_case_ ),
torch.randn(2 , snake_case_ , snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ),
False,
) , output_path=snake_case_ , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=snake_case_ , use_external_data_format=snake_case_ , )
_UpperCAmelCase = str(unet_path.absolute().as_posix() )
_UpperCAmelCase = os.path.dirname(snake_case_ )
_UpperCAmelCase = onnx.load(snake_case_ )
# clean up existing tensor files
shutil.rmtree(snake_case_ )
os.mkdir(snake_case_ )
# collate external tensor files into one
onnx.save_model(
snake_case_ , snake_case_ , save_as_external_data=snake_case_ , all_tensors_to_one_file=snake_case_ , location="weights.pb" , convert_attribute=snake_case_ , )
del pipeline.unet
# VAE ENCODER
_UpperCAmelCase = pipeline.vae
_UpperCAmelCase = vae_encoder.config.in_channels
_UpperCAmelCase = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
_UpperCAmelCase = lambda snake_case_ , snake_case_ : vae_encoder.encode(snake_case_ , snake_case_ )[0].sample()
onnx_export(
snake_case_ , model_args=(
torch.randn(1 , snake_case_ , snake_case_ , snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=snake_case_ , )
# VAE DECODER
_UpperCAmelCase = pipeline.vae
_UpperCAmelCase = vae_decoder.config.latent_channels
_UpperCAmelCase = vae_decoder.config.out_channels
# forward only through the decoder part
_UpperCAmelCase = vae_encoder.decode
onnx_export(
snake_case_ , model_args=(
torch.randn(1 , snake_case_ , snake_case_ , snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=snake_case_ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
_UpperCAmelCase = pipeline.safety_checker
_UpperCAmelCase = safety_checker.config.vision_config.num_channels
_UpperCAmelCase = safety_checker.config.vision_config.image_size
_UpperCAmelCase = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , snake_case_ , snake_case_ , snake_case_ , ).to(device=snake_case_ , dtype=snake_case_ ),
torch.randn(1 , snake_case_ , snake_case_ , snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=snake_case_ , )
del pipeline.safety_checker
_UpperCAmelCase = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
_UpperCAmelCase = pipeline.feature_extractor
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=snake_case_ , feature_extractor=snake_case_ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(snake_case_ )
print("ONNX pipeline saved to" , snake_case_ )
del pipeline
del onnx_pipeline
_UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(snake_case_ , provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
lowercase_ : Dict = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 133 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
_UpperCAmelCase = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 173 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 54 | """simple docstring"""
def lowercase ( a__ : Tuple , a__ : str ) -> Tuple:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowercase ( a__ : Optional[int] , a__ : List[str]=0 ) -> Optional[Any]:
return sorted(a__ , key=lambda a__ : x[column] )
def lowercase ( a__ : Optional[int] , a__ : Optional[int] , a__ : Tuple=float('''inf''' ) ) -> int:
for i in range(points_counts - 1 ):
for j in range(i + 1 , a__ ):
_UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_UpperCamelCase = current_dis
return min_dis
def lowercase ( a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Optional[Any]=float('''inf''' ) ) -> str:
for i in range(min(6 , points_counts - 1 ) , a__ ):
for j in range(max(0 , i - 6 ) , a__ ):
_UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_UpperCamelCase = current_dis
return min_dis
def lowercase ( a__ : int , a__ : str , a__ : Any ) -> str:
# base case
if points_counts <= 3:
return dis_between_closest_pair(a__ , a__ )
# recursion
_UpperCamelCase = points_counts // 2
_UpperCamelCase = closest_pair_of_points_sqr(
a__ , points_sorted_on_y[:mid] , a__ )
_UpperCamelCase = closest_pair_of_points_sqr(
a__ , points_sorted_on_y[mid:] , points_counts - mid )
_UpperCamelCase = min(a__ , a__ )
_UpperCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(a__ )
_UpperCamelCase = dis_between_closest_in_strip(
a__ , len(a__ ) , a__ )
return min(a__ , a__ )
def lowercase ( a__ : Dict , a__ : List[Any] ) -> Optional[Any]:
_UpperCamelCase = column_based_sort(a__ , column=0 )
_UpperCamelCase = column_based_sort(a__ , column=1 )
return (
closest_pair_of_points_sqr(
a__ , a__ , a__ )
) ** 0.5
if __name__ == "__main__":
UpperCAmelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 54 | 1 |
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
create_state_space_tree(lowercase_ , [] , 0 )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_lowerCamelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 14 |
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''SpeechT5FeatureExtractor'''
UpperCAmelCase__ = '''SpeechT5Tokenizer'''
def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]:
'''simple docstring'''
A__ = kwargs.pop('''audio''' , UpperCAmelCase__)
A__ = kwargs.pop('''text''' , UpperCAmelCase__)
A__ = kwargs.pop('''text_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
elif text is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if audio_target is not None:
A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_values''']
elif text_target is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = kwargs.pop('''input_values''' , UpperCAmelCase__)
A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__)
A__ = kwargs.pop('''labels''' , UpperCAmelCase__)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
elif input_ids is not None:
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]):
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = self.feature_extractor.feature_size
A__ = self.feature_extractor.num_mel_bins
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
A__ = feature_size_hack
A__ = targets['''input_values''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
| 14 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class _A :
def __init__( self : Optional[int] , _A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = str(id_ )
lowercase : Any = None
lowercase : List[str] = None
lowercase : str = []
lowercase : str = {} # {vertex:distance}
def __lt__( self : Dict , _A : int ) -> Union[str, Any]:
"""simple docstring"""
return self.key < other.key
def __repr__( self : Dict ) -> Tuple:
"""simple docstring"""
return self.id
def __a ( self : Tuple , _A : List[Any] ) -> List[str]:
"""simple docstring"""
self.neighbors.append(_a )
def __a ( self : List[str] , _A : Dict , _A : Any ) -> Optional[int]:
"""simple docstring"""
lowercase : Any = weight
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _snake_case )
graph[b - 1].add_edge(graph[a - 1] , _snake_case )
def snake_case( __magic_name__ , __magic_name__ ) -> list:
'''simple docstring'''
lowercase : Optional[int] = []
for u in graph:
lowercase : Optional[int] = math.inf
lowercase : Tuple = None
lowercase : int = 0
lowercase : Tuple = graph[:]
while q:
lowercase : Optional[int] = min(_snake_case )
q.remove(_snake_case )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase : Dict = u
lowercase : str = u.edges[v.id]
for i in range(1 , len(_snake_case ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case( __magic_name__ , __magic_name__ ) -> Iterator[tuple]:
'''simple docstring'''
for u in graph:
lowercase : Optional[int] = math.inf
lowercase : str = None
lowercase : List[Any] = 0
lowercase : Dict = list(_snake_case )
hq.heapify(_snake_case )
while h:
lowercase : Optional[int] = hq.heappop(_snake_case )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase : List[str] = u
lowercase : Dict = u.edges[v.id]
hq.heapify(_snake_case )
for i in range(1 , len(_snake_case ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 369 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class _A ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : str , _A : float , _A : Callable , _A : int , _A : float = 1.0 , _A : str = None , ) -> List[str]:
"""simple docstring"""
super().__init__()
lowercase : List[str] = initial_learning_rate
lowercase : List[str] = warmup_steps
lowercase : Tuple = power
lowercase : Any = decay_schedule_fn
lowercase : Union[str, Any] = name
def __call__( self : str , _A : Any ) -> Optional[int]:
"""simple docstring"""
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowercase : List[Any] = tf.cast(_A , tf.floataa )
lowercase : Union[str, Any] = tf.cast(self.warmup_steps , tf.floataa )
lowercase : List[str] = global_step_float / warmup_steps_float
lowercase : int = self.initial_learning_rate * tf.math.pow(_A , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_A , )
def __a ( self : Dict ) -> List[str]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 0.0 , __magic_name__ = 0.9 , __magic_name__ = 0.9_9_9 , __magic_name__ = 1e-8 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = 0.0 , __magic_name__ = 1.0 , __magic_name__ = None , ) -> int:
'''simple docstring'''
lowercase : List[Any] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__magic_name__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__magic_name__ , )
if num_warmup_steps:
lowercase : Optional[int] = WarmUp(
initial_learning_rate=__magic_name__ , decay_schedule_fn=__magic_name__ , warmup_steps=__magic_name__ , )
if weight_decay_rate > 0.0:
lowercase : Optional[Any] = AdamWeightDecay(
learning_rate=__magic_name__ , weight_decay_rate=__magic_name__ , beta_a=__magic_name__ , beta_a=__magic_name__ , epsilon=__magic_name__ , clipnorm=__magic_name__ , global_clipnorm=__magic_name__ , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=__magic_name__ , )
else:
lowercase : str = tf.keras.optimizers.Adam(
learning_rate=__magic_name__ , beta_a=__magic_name__ , beta_a=__magic_name__ , epsilon=__magic_name__ , clipnorm=__magic_name__ , global_clipnorm=__magic_name__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class _A ( _lowerCamelCase ):
def __init__( self : Optional[Any] , _A : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , _A : float = 0.9 , _A : float = 0.999 , _A : float = 1E-7 , _A : bool = False , _A : float = 0.0 , _A : Optional[List[str]] = None , _A : Optional[List[str]] = None , _A : str = "AdamWeightDecay" , **_A : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(_A , _A , _A , _A , _A , _A , **_A )
lowercase : Tuple = weight_decay_rate
lowercase : List[str] = include_in_weight_decay
lowercase : Optional[Any] = exclude_from_weight_decay
@classmethod
def __a ( cls : Tuple , _A : Tuple ) -> List[str]:
"""simple docstring"""
lowercase : Optional[int] = {'''WarmUp''': WarmUp}
return super(_A , cls ).from_config(_A , custom_objects=_A )
def __a ( self : Dict , _A : Tuple , _A : Dict , _A : Tuple ) -> Tuple:
"""simple docstring"""
super(_A , self )._prepare_local(_A , _A , _A )
lowercase : List[Any] = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def __a ( self : Tuple , _A : Optional[int] , _A : Union[str, Any] , _A : List[Any] ) -> Any:
"""simple docstring"""
lowercase : str = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def __a ( self : Union[str, Any] , _A : Any , _A : Optional[Any]=None , **_A : Tuple ) -> Any:
"""simple docstring"""
lowercase , lowercase : Tuple = list(zip(*_A ) )
return super(_A , self ).apply_gradients(zip(_A , _A ) , name=_A , **_A )
def __a ( self : List[Any] , _A : Optional[Any] , _A : str , _A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowercase : Any = apply_state or {}
lowercase : str = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowercase : List[Any] = self._fallback_apply_state(_A , _A )
lowercase : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __a ( self : Tuple , _A : Union[str, Any] , _A : Tuple , _A : str=None ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase : List[str] = self._get_lr(var.device , var.dtype.base_dtype , _A )
lowercase : Optional[Any] = self._decay_weights_op(_A , _A , _A )
with tf.control_dependencies([decay] ):
return super(_A , self )._resource_apply_dense(_A , _A , **_A )
def __a ( self : Optional[int] , _A : List[Any] , _A : Dict , _A : Union[str, Any] , _A : str=None ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , _A )
lowercase : str = self._decay_weights_op(_A , _A , _A )
with tf.control_dependencies([decay] ):
return super(_A , self )._resource_apply_sparse(_A , _A , _A , **_A )
def __a ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase : Optional[Any] = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def __a ( self : str , _A : Optional[int] ) -> Tuple:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_A , _A ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_A , _A ) is not None:
return False
return True
class _A ( _lowerCamelCase ):
def __init__( self : List[Any] ) -> str:
"""simple docstring"""
lowercase : Optional[Any] = []
lowercase : str = None
@property
def __a ( self : Any ) -> int:
"""simple docstring"""
if self._accum_steps is None:
lowercase : Optional[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_A , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def __a ( self : int ) -> List[Any]:
"""simple docstring"""
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : str , _A : int ) -> str:
"""simple docstring"""
if not self._gradients:
lowercase : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_A ) , trainable=_A , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_A ) != len(self._gradients ):
raise ValueError(f"""Expected {len(self._gradients )} gradients, but got {len(_A )}""" )
for accum_gradient, gradient in zip(self._gradients , _A ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_A )
self._accum_steps.assign_add(1 )
def __a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_A ) ) | 116 | 0 |
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str:
if not (isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(UpperCAmelCase , UpperCAmelCase )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
snake_case_ = len(UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
snake_case_ = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
snake_case_ = 0
snake_case_ = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
snake_case_ = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
snake_case_ = i
snake_case_ = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 | """simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''▁'''
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCamelCase = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
__UpperCamelCase = {
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
__UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
def __init__( self, lowerCAmelCase__, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__ = None, lowerCAmelCase__=None, lowerCAmelCase__=False, **lowerCAmelCase__, ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case_ = legacy_behaviour
super().__init__(
bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, src_lang=lowerCAmelCase__, tgt_lang=lowerCAmelCase__, additional_special_tokens=lowerCAmelCase__, sp_model_kwargs=self.sp_model_kwargs, legacy_behaviour=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCAmelCase__))
snake_case_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ = 1
snake_case_ = len(self.sp_model)
snake_case_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__)
}
snake_case_ = {v: k for k, v in self.lang_code_to_id.items()}
snake_case_ = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case_ = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
snake_case_ = src_lang if src_lang is not None else 'eng_Latn'
snake_case_ = self.lang_code_to_id[self._src_lang]
snake_case_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self) -> Union[str, Any]:
snake_case_ = self.__dict__.copy()
snake_case_ = None
snake_case_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, lowerCAmelCase__) -> Tuple:
snake_case_ = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs'):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def a_ ( self) -> str:
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a_ ( self) -> str:
return self._src_lang
@src_lang.setter
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__, token_ids_a=lowerCAmelCase__, already_has_special_tokens=lowerCAmelCase__)
snake_case_ = [1] * len(self.prefix_tokens)
snake_case_ = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__)) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__)) + ([0] * len(lowerCAmelCase__)) + suffix_ones
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
snake_case_ = src_lang
snake_case_ = self(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__)
snake_case_ = tgt_lang_id
return inputs
def a_ ( self) -> List[Any]:
snake_case_ = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def a_ ( self, lowerCAmelCase__) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__, out_type=lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(lowerCAmelCase__)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a_ ( self, lowerCAmelCase__) -> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def a_ ( self, lowerCAmelCase__) -> List[str]:
snake_case_ = ''.join(lowerCAmelCase__).replace(lowerCAmelCase__, ' ').strip()
return out_string
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, lowerCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase__, 'wb') as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__)
return (out_vocab_file,)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = "eng_Latn", lowerCAmelCase__ = None, lowerCAmelCase__ = "fra_Latn", **lowerCAmelCase__, ) -> BatchEncoding:
snake_case_ = src_lang
snake_case_ = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang)
def a_ ( self) -> int:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ = [self.cur_lang_code]
snake_case_ = [self.eos_token_id]
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ = [self.cur_lang_code]
snake_case_ = [self.eos_token_id]
| 69 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :str = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :List[Any] = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Tuple = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Tuple = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 371 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCamelCase_ ():
_UpperCAmelCase : int = HfArgumentParser(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase : Union[str, Any] = TensorFlowBenchmark(args=UpperCamelCase__ )
try:
_UpperCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_UpperCAmelCase : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
_UpperCAmelCase : Tuple = ''' '''.join(str(UpperCamelCase__ ).split(''' ''' )[:-1] )
_UpperCAmelCase : int = ''''''
_UpperCAmelCase : List[Any] = eval(str(UpperCamelCase__ ).split(''' ''' )[-1] )
_UpperCAmelCase : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(UpperCamelCase__ )
raise ValueError(UpperCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 68 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
def __init__( self :Tuple , _A :Optional[Any] , _A :List[Any]=13 , _A :int=32 , _A :List[str]=3 , _A :Any=4 , _A :Any=[10, 20, 30, 40] , _A :Optional[int]=[2, 2, 3, 2] , _A :str=True , _A :Tuple=True , _A :str=37 , _A :List[str]="gelu" , _A :Dict=10 , _A :Tuple=0.02 , _A :Dict=["stage2", "stage3", "stage4"] , _A :Tuple=[2, 3, 4] , _A :List[Any]=None , ) -> str:
'''simple docstring'''
__A = parent
__A = batch_size
__A = image_size
__A = num_channels
__A = num_stages
__A = hidden_sizes
__A = depths
__A = is_training
__A = use_labels
__A = intermediate_size
__A = hidden_act
__A = num_labels
__A = initializer_range
__A = out_features
__A = out_indices
__A = scope
def lowercase_ ( self :Any ) -> str:
'''simple docstring'''
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.num_labels )
__A = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self :str ) -> str:
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowercase_ ( self :int , _A :str , _A :Any , _A :List[str] ) -> Tuple:
'''simple docstring'''
__A = ConvNextVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A = model(lowerCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase_ ( self :str , _A :Optional[int] , _A :Tuple , _A :Tuple ) -> Dict:
'''simple docstring'''
__A = ConvNextVaForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self :List[str] , _A :Dict , _A :Any , _A :Optional[Any] ) -> Any:
'''simple docstring'''
__A = ConvNextVaBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A = model(lowerCAmelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__A = None
__A = ConvNextVaBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__A = model(lowerCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_ ( self :Union[str, Any] ) -> int:
'''simple docstring'''
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {'pixel_values': pixel_values}
return config, inputs_dict
def lowercase_ ( self :List[str] ) -> Any:
'''simple docstring'''
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
UpperCAmelCase__ : str = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : str = False
def lowercase_ ( self :int ) -> Tuple:
'''simple docstring'''
__A = ConvNextVaModelTester(self )
__A = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowercase_ ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def lowercase_ ( self :List[str] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def lowercase_ ( self :List[str] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def lowercase_ ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowercase_ ( self :int ) -> int:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__A , __A = self.model_tester.prepare_config_and_inputs_with_labels()
__A = True
if model_class.__name__ in [
*get_values(lowerCAmelCase_ ),
*get_values(lowerCAmelCase_ ),
]:
continue
__A = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
__A = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
__A = model(**lowerCAmelCase_ ).loss
loss.backward()
def lowercase_ ( self :Union[str, Any] ) -> int:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__A , __A = self.model_tester.prepare_config_and_inputs_with_labels()
__A = False
__A = True
if (
model_class.__name__
in [*get_values(lowerCAmelCase_ ), *get_values(lowerCAmelCase_ )]
or not model_class.supports_gradient_checkpointing
):
continue
__A = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.gradient_checkpointing_enable()
model.train()
__A = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
__A = model(**lowerCAmelCase_ ).loss
loss.backward()
def lowercase_ ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(lowerCAmelCase_ )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase_ ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase_ ( self :Tuple ) -> Any:
'''simple docstring'''
def check_hidden_states_output(_A :Union[str, Any] , _A :List[str] , _A :Dict ):
__A = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__A = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase_ ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowercase_ ( self :Optional[Any] ) -> str:
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = ConvNextVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case ( )-> List[str]:
"""simple docstring"""
__A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase):
@cached_property
def lowercase_ ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def lowercase_ ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__A = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(lowerCAmelCase_ )
__A = self.default_image_processor
__A = prepare_img()
__A = preprocessor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__A = model(**lowerCAmelCase_ )
# verify the logits
__A = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__A = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 161 | def snake_case ( snake_case__ :str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
_A = set()
# Replace all the whitespace in our sentence
_A = input_str.replace(""" """ , """""")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(snake_case__) == 26
def snake_case ( snake_case__ :str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
_A = [False] * 26
for char in input_str:
if char.islower():
_A = True
elif char.isupper():
_A = True
return all(snake_case__)
def snake_case ( snake_case__ :str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def snake_case ( ) -> None:
from timeit import timeit
_A = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=snake_case__))
print(timeit("""is_pangram_faster()""" , setup=snake_case__))
print(timeit("""is_pangram_fastest()""" , setup=snake_case__))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 180 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def _A (lowerCAmelCase__ :Optional[int] ) -> int:
'''simple docstring'''
return choice(lowercase_ )
def _A (lowerCAmelCase__ :list[int] , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
_a = random_pivot(lowercase_ )
# partition based on pivot
# linear time
_a = [e for e in lst if e < pivot]
_a = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowercase_ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowercase_ ) < k - 1:
return kth_number(lowercase_ , k - len(lowercase_ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class a ( unittest.TestCase ):
def __UpperCAmelCase ( self ) -> int:
_a = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Dict:
_a = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> str:
_a = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__magic_name__ ) )
def __UpperCAmelCase ( self ) -> List[Any]:
_a = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Any:
_a = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> int:
# pass variant but use the non-variant filenames
_a = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> List[str]:
_a = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = 'fp16'
self.assertFalse(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> List[Any]:
# pass variant but use the non-variant filenames
_a = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
_a = 'fp16'
self.assertTrue(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
def __UpperCAmelCase ( self ) -> List[Any]:
_a = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = 'fp16'
self.assertFalse(is_safetensors_compatible(__magic_name__ , variant=__magic_name__ ) )
| 104 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :List[Any] = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class A_ ( lowerCamelCase_ ):
_lowerCamelCase : Dict = 'roberta'
def __init__( self : Optional[Any] , snake_case_ : Dict=5_0_2_6_5 , snake_case_ : Union[str, Any]=7_6_8 , snake_case_ : Dict=1_2 , snake_case_ : List[Any]=1_2 , snake_case_ : List[str]=3_0_7_2 , snake_case_ : str="gelu" , snake_case_ : Any=0.1 , snake_case_ : Optional[Any]=0.1 , snake_case_ : List[str]=5_1_2 , snake_case_ : Dict=2 , snake_case_ : str=0.0_2 , snake_case_ : int=1e-12 , snake_case_ : int=1 , snake_case_ : List[Any]=0 , snake_case_ : str=2 , snake_case_ : Optional[int]="absolute" , snake_case_ : List[Any]=True , snake_case_ : Union[str, Any]=None , **snake_case_ : List[Any] , ):
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class A_ ( lowerCamelCase_ ):
@property
def lowercase ( self : Tuple ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 22 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a__ ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = ReformerTokenizer
_SCREAMING_SNAKE_CASE : List[Any] = ReformerTokenizerFast
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = True
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_lowercase : Union[str, Any] = ReformerTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = "<s>"
_lowercase : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_UpperCamelCase ) , 1000 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowercase : List[str] = self.get_tokenizer()
_lowercase : int = self.get_rust_tokenizer()
_lowercase : Optional[Any] = "I was born in 92000, and this is falsé."
_lowercase : Union[str, Any] = tokenizer.tokenize(_UpperCamelCase )
_lowercase : int = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_lowercase : Tuple = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_lowercase : Any = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_lowercase : List[str] = self.get_rust_tokenizer()
_lowercase : Optional[int] = tokenizer.encode(_UpperCamelCase )
_lowercase : Any = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# Simple input
_lowercase : Union[str, Any] = "This is a simple input"
_lowercase : Union[str, Any] = ["This is a simple input 1", "This is a simple input 2"]
_lowercase : Any = ("This is a simple input", "This is a pair")
_lowercase : List[str] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = ReformerTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
_lowercase : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [285, 46, 10, 170, 382] , )
_lowercase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_lowercase : str = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = "Hello World!"
_lowercase : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_lowercase : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@require_torch
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_lowercase : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowercase : Tuple = " ".join(_UpperCamelCase )
_lowercase : str = self.big_tokenizer.encode_plus(_UpperCamelCase , return_tensors="pt" )
_lowercase : List[Any] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_lowercase : Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_lowercase : str = encoded_sequence["input_ids"].shape
_lowercase : Dict = ReformerModel(_UpperCamelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCamelCase )
model(**_UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_lowercase : Optional[int] = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=_UpperCamelCase , sequences=_UpperCamelCase , )
| 250 | 0 |
from __future__ import annotations
import requests
UpperCamelCase__ =set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase = 1, __lowerCamelCase = "new", __lowerCamelCase = None ):
_SCREAMING_SNAKE_CASE : Optional[int] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__lowerCamelCase ) - valid_terms ) ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""", headers={"User-agent": "A random string"}, )
if response.status_code == 429:
raise requests.HTTPError
_SCREAMING_SNAKE_CASE : Optional[int] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__lowerCamelCase )}
_SCREAMING_SNAKE_CASE : int = {}
for id_ in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext'])) | 325 |
import numpy as np
import datasets
UpperCamelCase__ ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
UpperCamelCase__ ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
UpperCamelCase__ ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
# convert to numpy arrays
_SCREAMING_SNAKE_CASE : Dict = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
_SCREAMING_SNAKE_CASE : Any = X - np.mean(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.cov(reference_distribution.T )
try:
_SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
_SCREAMING_SNAKE_CASE : List[str] = np.linalg.pinv(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 325 | 1 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any , __lowercase : Optional[int] , __lowercase : List[Any] ):
'''simple docstring'''
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(__lowerCamelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = None
ops.enable_eager_execution_internal()
__a = tf.config.list_physical_devices("""CPU""" )
if len(__lowerCamelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a = tf.config.list_logical_devices(device_type="""CPU""" )
__a = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a = GradientAccumulator()
__a = tf.Variable([4.0, 3.0] )
__a = create_optimizer(5E-5 , 10 , 5 )
__a = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase )
def accumulate_on_replica(__lowercase : List[str] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__lowercase : Optional[int] , __lowercase : List[str] ):
with strategy.scope():
__a = strategy.experimental_local_results(__lowerCamelCase )
local_variables[0].assign(__lowerCamelCase )
local_variables[1].assign(__lowerCamelCase )
strategy.run(__lowerCamelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(__lowerCamelCase )
def _check_local_values(__lowercase : List[str] , __lowercase : List[str] ):
__a = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 302 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.2 , __lowerCamelCase=0.2) -> str:
_A : Optional[int] = bp_numa
_A : Dict = bp_numa
_A : Tuple = bp_numa
_A : List[str] = conva_get[:2]
_A : Tuple = conva_get[2]
_A : Optional[int] = size_pa
_A : Optional[Any] = rate_w
_A : Optional[Any] = rate_t
_A : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
_A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Any = -2 * np.random.rand(self.conva[1]) + 1
_A : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1
_A : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# save model dict with pickle
_A : Dict = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowerCamelCase , "wb") as f:
pickle.dump(__lowerCamelCase , __lowerCamelCase)
print(F"Model saved: {save_path}")
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase) -> Any:
# read saved model
with open(__lowerCamelCase , "rb") as f:
_A : Any = pickle.load(__lowerCamelCase) # noqa: S301
_A : Optional[int] = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
_A : str = model_dic.get("size_pooling1")
_A : List[str] = model_dic.get("num_bp1")
_A : Union[str, Any] = model_dic.get("num_bp2")
_A : List[Any] = model_dic.get("num_bp3")
_A : Dict = model_dic.get("rate_weight")
_A : List[Any] = model_dic.get("rate_thre")
# create model instance
_A : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# modify model parameter
_A : List[Any] = model_dic.get("w_conv1")
_A : Union[str, Any] = model_dic.get("wkj")
_A : str = model_dic.get("vji")
_A : List[str] = model_dic.get("thre_conv1")
_A : Optional[Any] = model_dic.get("thre_bp2")
_A : Dict = model_dic.get("thre_bp3")
return conv_ins
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
return 1 / (1 + np.exp(-1 * x))
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
return round(__lowerCamelCase , 3)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]:
# convolution process
_A : Tuple = convs[0]
_A : Union[str, Any] = convs[1]
_A : List[Any] = np.shape(__lowerCamelCase)[0]
# get the data slice of original image data, data_focus
_A : Tuple = []
for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
_A : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowerCamelCase)
# calculate the feature map of every single kernel, and saved as list of matrix
_A : Optional[Any] = []
_A : Optional[int] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(__lowerCamelCase):
_A : Optional[int] = []
for i_focus in range(len(__lowerCamelCase)):
_A : Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowerCamelCase))
_A : Optional[Any] = np.asmatrix(__lowerCamelCase).reshape(
__lowerCamelCase , __lowerCamelCase)
data_featuremap.append(__lowerCamelCase)
# expanding the data slice to One dimenssion
_A : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowerCamelCase))
_A : Dict = np.asarray(__lowerCamelCase)
return focus_list, data_featuremap
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="average_pool") -> Dict:
# pooling process
_A : Optional[Any] = len(featuremaps[0])
_A : str = int(size_map / size_pooling)
_A : Optional[int] = []
for i_map in range(len(__lowerCamelCase)):
_A : int = featuremaps[i_map]
_A : Optional[int] = []
for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowerCamelCase))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowerCamelCase))
_A : Tuple = np.asmatrix(__lowerCamelCase).reshape(__lowerCamelCase , __lowerCamelCase)
featuremap_pooled.append(__lowerCamelCase)
return featuremap_pooled
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
# expanding three dimension data to one dimension list
_A : Tuple = []
for i in range(len(__lowerCamelCase)):
_A : Union[str, Any] = np.shape(data[i])
_A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1])
_A : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(__lowerCamelCase)
_A : Optional[Any] = np.asarray(__lowerCamelCase)
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
# expanding matrix to one dimension list
_A : List[Any] = np.asarray(__lowerCamelCase)
_A : Union[str, Any] = np.shape(__lowerCamelCase)
_A : Dict = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Dict = []
_A : Any = 0
for i_map in range(__lowerCamelCase):
_A : Union[str, Any] = np.ones((size_map, size_map))
for i in range(0 , __lowerCamelCase , __lowerCamelCase):
for j in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : List[Any] = pd_pool[
i_pool
]
_A : Tuple = i_pool + 1
_A : Optional[Any] = np.multiply(
__lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(__lowerCamelCase)
return pd_all
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=bool) -> Union[str, Any]:
# model traning
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase)))
print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase)))
_A : Tuple = 0
_A : Dict = []
_A : Optional[Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
_A : Union[str, Any] = 0
print(F"-------------Learning Time {rp}--------------")
for p in range(len(__lowerCamelCase)):
# print('------------Learning Image: %d--------------'%p)
_A : str = np.asmatrix(datas_train[p])
_A : Union[str, Any] = np.asarray(datas_teach[p])
_A , _A : Any = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = np.shape(__lowerCamelCase)
_A : List[str] = self._expand(__lowerCamelCase)
_A : Tuple = data_bp_input
_A : int = np.dot(__lowerCamelCase , self.vji.T) - self.thre_bpa
_A : List[Any] = self.sig(__lowerCamelCase)
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T) - self.thre_bpa
_A : List[str] = self.sig(__lowerCamelCase)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_A : int = np.multiply(
(data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Optional[Any] = np.multiply(
np.dot(__lowerCamelCase , self.wkj) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.vji)
_A : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
_A : Dict = pd_conva_pooled.T.getA().tolist()
_A : Optional[Any] = self._calculate_gradient_from_pool(
__lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
_A : int = self._expand_mat(pd_conva_all[k_conv])
_A : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
_A : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
_A : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_A : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_A : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
_A : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_A : Optional[int] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_A : Any = rp + 1
_A : Dict = error_count / patterns
all_mse.append(__lowerCamelCase)
def draw_error():
_A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(__lowerCamelCase , "+-")
plt.plot(__lowerCamelCase , "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(__lowerCamelCase , alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
# model predict
_A : Union[str, Any] = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase)))
for p in range(len(__lowerCamelCase)):
_A : int = np.asmatrix(datas_test[p])
_A , _A : List[Any] = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : str = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = self._expand(__lowerCamelCase)
_A : List[Any] = data_bp_input
_A : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
_A : int = self.sig(__lowerCamelCase)
_A : int = bp_outa * self.wkj.T - self.thre_bpa
_A : Optional[int] = self.sig(__lowerCamelCase)
produce_out.extend(bp_outa.getA().tolist())
_A : int = [list(map(self.do_round , __lowerCamelCase)) for each in produce_out]
return np.asarray(__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# return the data of image after convoluting process so we can check it out
_A : Optional[int] = np.asmatrix(__lowerCamelCase)
_A , _A : Tuple = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 11 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __lowerCamelCase ( unittest.TestCase , a__ ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> str:
_a = load_tool('''text-classification''' )
self.tool.setup()
_a = load_tool('''text-classification''' , remote=__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
_a = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(__UpperCAmelCase , '''positive''' )
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(__UpperCAmelCase , '''positive''' )
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(__UpperCAmelCase , '''positive''' )
def _UpperCAmelCase ( self ) -> List[Any]:
_a = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(__UpperCAmelCase , '''positive''' ) | 153 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = tempfile.mkdtemp()
_a = 8
# DPR tok
_a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_a = os.path.join(__UpperCAmelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
_a = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _UpperCAmelCase ( self ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _UpperCAmelCase ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def _UpperCAmelCase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> str:
_a = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.get_dummy_dataset()
_a = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_a = dataset
_a = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> int:
_a = self.get_dummy_dataset()
_a = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_a = os.path.join(self.tmpdirname , '''dataset''' )
_a = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_a = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_a = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase ) , )
return retriever
def _UpperCAmelCase ( self ) -> int:
_a = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_a = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_a = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_a = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__UpperCAmelCase , open(__UpperCAmelCase , '''wb''' ) )
_a = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_a = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _UpperCAmelCase ( self ) -> int:
_a = 1
_a = self.get_dummy_canonical_hf_index_retriever()
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a , _a , _a = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCAmelCase ( self ) -> List[Any]:
_a = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_a = self.get_dummy_dataset()
retriever.save_pretrained(__UpperCAmelCase )
_a = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def _UpperCAmelCase ( self ) -> Dict:
_a = 1
_a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a , _a , _a = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCAmelCase ( self ) -> int:
_a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
_a = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def _UpperCAmelCase ( self ) -> Any:
_a = 1
_a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a , _a , _a = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
_a = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def _UpperCAmelCase ( self ) -> List[str]:
_a = 1
_a = self.get_dummy_legacy_index_retriever()
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a , _a , _a = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
_a = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _UpperCAmelCase ( self ) -> Any:
import torch
_a = 1
_a = self.get_dummy_canonical_hf_index_retriever()
_a = [[5, 7], [10, 11]]
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
_a , _a , _a = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
_a = retriever(
__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase , return_tensors='''pt''' , )
_a , _a , _a , _a = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _UpperCAmelCase ( self ) -> List[Any]:
_a = self.get_dpr_ctx_encoder_tokenizer()
_a = 1
_a = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
retriever.set_ctx_encoder_tokenizer(__UpperCAmelCase )
_a = [[5, 7], [10, 11]]
_a = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_a = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
self.assertEqual(
len(__UpperCAmelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __UpperCAmelCase ) # check for doc token related keys in dictionary. | 153 | 1 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A : int = logging.get_logger(__name__)
class A ( enum.Enum ):
'''simple docstring'''
A__ = 0
A__ = 1
@add_end_docstrings(UpperCAmelCase__ )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''generated'''
def __init__(self : Optional[int] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : Dict , ) -> List[str]:
"""simple docstring"""
lowercase__ = {}
if truncation is not None:
lowercase__ = truncation
lowercase__ = generate_kwargs
lowercase__ = {}
if return_tensors is not None and return_type is None:
lowercase__ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase__ = return_type
if clean_up_tokenization_spaces is not None:
lowercase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase__ = self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
lowercase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCamelCase__ (self : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
return True
def lowerCamelCase__ (self : Union[str, Any] , *_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , _UpperCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
lowercase__ = ([prefix + arg for arg in args[0]],)
lowercase__ = True
elif isinstance(args[0] , _UpperCAmelCase ):
lowercase__ = (prefix + args[0],)
lowercase__ = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
lowercase__ = self.tokenizer(*_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__(self : Optional[int] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = super().__call__(*_UpperCAmelCase , **_UpperCAmelCase )
if (
isinstance(args[0] , _UpperCAmelCase )
and all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for el in args[0] )
and all(len(_UpperCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self._parse_and_tokenize(_UpperCAmelCase , truncation=_UpperCAmelCase , **_UpperCAmelCase )
return inputs
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : str , **_UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
if self.framework == "pt":
lowercase__ , lowercase__ = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
lowercase__ , lowercase__ = tf.shape(model_inputs["""input_ids"""] ).numpy()
lowercase__ = generate_kwargs.get("""min_length""" , self.model.config.min_length )
lowercase__ = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(_UpperCAmelCase , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
lowercase__ = self.model.generate(**_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = output_ids.shape[0]
if self.framework == "pt":
lowercase__ = output_ids.reshape(_UpperCAmelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase__ = tf.reshape(_UpperCAmelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int=ReturnType.TEXT , _UpperCAmelCase : Any=False ) -> Dict:
"""simple docstring"""
lowercase__ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase__ = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
lowercase__ = {
f'''{self.return_name}_text''': self.tokenizer.decode(
_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , )
}
records.append(_UpperCAmelCase )
return records
@add_end_docstrings(UpperCAmelCase__ )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''summary'''
def __call__(self : List[str] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : str ) -> int:
"""simple docstring"""
return super().__call__(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(UpperCAmelCase__ )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''translation'''
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[str]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def lowerCamelCase__ (self : List[str] , *_UpperCAmelCase : Any , _UpperCAmelCase : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : List[str]=None ) -> Optional[int]:
"""simple docstring"""
if getattr(self.tokenizer , """_build_translation_inputs""" , _UpperCAmelCase ):
return self.tokenizer._build_translation_inputs(
*_UpperCAmelCase , return_tensors=self.framework , truncation=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase )
else:
return super()._parse_and_tokenize(*_UpperCAmelCase , truncation=_UpperCAmelCase )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Any=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = super()._sanitize_parameters(**_UpperCAmelCase )
if src_lang is not None:
lowercase__ = src_lang
if tgt_lang is not None:
lowercase__ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase__ = kwargs.get("""task""" , self.task )
lowercase__ = task.split("""_""" )
if task and len(_UpperCAmelCase ) == 4:
# translation, XX, to YY
lowercase__ = items[1]
lowercase__ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__(self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
return super().__call__(*_UpperCAmelCase , **_UpperCAmelCase )
| 305 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __magic_name__ : str = "AAPL" ) -> str:
"""simple docstring"""
lowercase__ = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
lowercase__ = BeautifulSoup(requests.get(__magic_name__ ).text , """html.parser""" )
lowercase__ = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 305 | 1 |
from __future__ import annotations
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> list[str]:
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
UpperCamelCase : str = number_of_bytes // partitions
UpperCamelCase : List[Any] = []
for i in range(_lowerCAmelCase ):
UpperCamelCase : Optional[Any] = i * bytes_per_partition + 1
UpperCamelCase : Any = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowerCamelCase : Dict = logging.get_logger("""transformers.models.speecht5""")
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
hf_model.apply_weight_norm()
UpperCamelCase : int = checkpoint["input_conv.weight_g"]
UpperCamelCase : Dict = checkpoint["input_conv.weight_v"]
UpperCamelCase : List[Any] = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase : Any = checkpoint[F"""upsamples.{i}.1.weight_g"""]
UpperCamelCase : List[Any] = checkpoint[F"""upsamples.{i}.1.weight_v"""]
UpperCamelCase : Optional[Any] = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCamelCase : int = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCamelCase : str = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCamelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCamelCase : int = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCamelCase : Optional[Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCamelCase : Tuple = checkpoint["output_conv.1.weight_g"]
UpperCamelCase : Tuple = checkpoint["output_conv.1.weight_v"]
UpperCamelCase : int = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> Tuple:
if config_path is not None:
UpperCamelCase : List[Any] = SpeechTaHifiGanConfig.from_pretrained(_lowerCAmelCase )
else:
UpperCamelCase : Optional[int] = SpeechTaHifiGanConfig()
UpperCamelCase : List[str] = SpeechTaHifiGan(_lowerCAmelCase )
UpperCamelCase : str = torch.load(_lowerCAmelCase )
load_weights(orig_checkpoint["model"]["generator"] , _lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : List[Any] = np.load(_lowerCAmelCase )
UpperCamelCase : List[str] = stats[0].reshape(-1 )
UpperCamelCase : Tuple = stats[1].reshape(-1 )
UpperCamelCase : Any = torch.from_numpy(_lowerCAmelCase ).float()
UpperCamelCase : Any = torch.from_numpy(_lowerCAmelCase ).float()
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 140 | 0 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 61 | 0 |
"""simple docstring"""
def lowercase__(A ) ->bool:
"""simple docstring"""
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
a : Optional[Any] = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
a : str = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def lowercase__() ->List[Any]:
"""simple docstring"""
lowercase__ : str= calculate_rouge(A , A , bootstrap_aggregation=A , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(A , A )
lowercase__ : Optional[int]= calculate_rouge(A , A , bootstrap_aggregation=A , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def lowercase__() ->int:
"""simple docstring"""
lowercase__ : Optional[int]= "rougeLsum"
lowercase__ : str= calculate_rouge(A , A , newline_sep=A , rouge_keys=[k] )[k]
lowercase__ : Union[str, Any]= calculate_rouge(A , A , newline_sep=A , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowercase__() ->Tuple:
"""simple docstring"""
lowercase__ : Tuple= ["rouge1", "rouge2", "rougeL"]
lowercase__ : Optional[Any]= calculate_rouge(A , A , newline_sep=A , rouge_keys=A )
lowercase__ : Dict= calculate_rouge(A , A , newline_sep=A , rouge_keys=A )
assert score_sep == score_no_sep
def lowercase__() ->Optional[int]:
"""simple docstring"""
lowercase__ : int= [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowercase__ : Dict= [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(A , A , newline_sep=A ) == calculate_rouge(A , A , newline_sep=A )
def lowercase__() ->Dict:
"""simple docstring"""
lowercase__ : List[str]= [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowercase__ : Union[str, Any]= [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowercase__ : List[Any]= calculate_rouge(A , A , rouge_keys=["rougeLsum"] , newline_sep=A )["rougeLsum"]
lowercase__ : Optional[Any]= calculate_rouge(A , A , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def lowercase__() ->Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any]= Path("examples/seq2seq/test_data/wmt_en_ro" )
lowercase__ : Union[str, Any]= calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(A , A )
lowercase__ : List[Any]= calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=A )
assert isinstance(A , A )
| 150 | 1 |
from manim import *
class __A ( a ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =Rectangle(height=0.5 , width=0.5 )
__UpperCamelCase : Dict =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__UpperCamelCase : Union[str, Any] =[mem.copy() for i in range(6 )]
__UpperCamelCase : List[str] =[mem.copy() for i in range(6 )]
__UpperCamelCase : int =VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : int =VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : List[Any] =VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : str =Text('CPU' , font_size=24 )
__UpperCamelCase : Dict =Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =[mem.copy() for i in range(4 )]
__UpperCamelCase : List[str] =VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : str =Text('GPU' , font_size=24 )
__UpperCamelCase : List[str] =Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase__ )
__UpperCamelCase : Optional[int] =[mem.copy() for i in range(6 )]
__UpperCamelCase : Dict =VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : Optional[int] =Text('Model' , font_size=24 )
__UpperCamelCase : Tuple =Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase__ )
__UpperCamelCase : List[Any] =[]
for i, rect in enumerate(lowerCamelCase__ ):
rect.set_stroke(lowerCamelCase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__UpperCamelCase : List[Any] =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowerCamelCase__ , buff=0.0 )
self.add(lowerCamelCase__ )
cpu_targs.append(lowerCamelCase__ )
__UpperCamelCase : Tuple =[mem.copy() for i in range(6 )]
__UpperCamelCase : List[str] =VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : List[str] =Text('Loaded Checkpoint' , font_size=24 )
__UpperCamelCase : Union[str, Any] =Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , aligned_edge=lowerCamelCase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__UpperCamelCase : List[Any] =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCamelCase : List[str] =MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple =MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowerCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__UpperCamelCase : int =MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) )
self.play(Write(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) )
__UpperCamelCase : str =[]
__UpperCamelCase : Tuple =[]
for i, rect in enumerate(lowerCamelCase__ ):
__UpperCamelCase : List[Any] =fill.copy().set_fill(lowerCamelCase__ , opacity=0.7 )
target.move_to(lowerCamelCase__ )
first_animations.append(GrowFromCenter(lowerCamelCase__ , run_time=1 ) )
__UpperCamelCase : Dict =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 71 |
def A ( a_ ,a_ ,a_ ) -> int:
def update_area_of_max_square(a_ ,a_ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__UpperCamelCase : Optional[int] =update_area_of_max_square(a_ ,col + 1 )
__UpperCamelCase : List[str] =update_area_of_max_square(row + 1 ,col + 1 )
__UpperCamelCase : List[Any] =update_area_of_max_square(row + 1 ,a_ )
if mat[row][col]:
__UpperCamelCase : Optional[Any] =1 + min([right, diagonal, down] )
__UpperCamelCase : Dict =max(largest_square_area[0] ,a_ )
return sub_problem_sol
else:
return 0
__UpperCamelCase : Union[str, Any] =[0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def A ( a_ ,a_ ,a_ ) -> int:
def update_area_of_max_square_using_dp_array(
a_ ,a_ ,a_ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__UpperCamelCase : Tuple =update_area_of_max_square_using_dp_array(a_ ,col + 1 ,a_ )
__UpperCamelCase : Optional[int] =update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,a_ )
__UpperCamelCase : Any =update_area_of_max_square_using_dp_array(row + 1 ,a_ ,a_ )
if mat[row][col]:
__UpperCamelCase : Optional[Any] =1 + min([right, diagonal, down] )
__UpperCamelCase : str =max(largest_square_area[0] ,a_ )
__UpperCamelCase : Any =sub_problem_sol
return sub_problem_sol
else:
return 0
__UpperCamelCase : Tuple =[0]
__UpperCamelCase : List[Any] =[[-1] * cols for _ in range(a_ )]
update_area_of_max_square_using_dp_array(0 ,0 ,a_ )
return largest_square_area[0]
def A ( a_ ,a_ ,a_ ) -> int:
__UpperCamelCase : Dict =[[0] * (cols + 1) for _ in range(rows + 1 )]
__UpperCamelCase : int =0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
__UpperCamelCase : Optional[Any] =dp_array[row][col + 1]
__UpperCamelCase : int =dp_array[row + 1][col + 1]
__UpperCamelCase : Tuple =dp_array[row + 1][col]
if mat[row][col] == 1:
__UpperCamelCase : Tuple =1 + min(a_ ,a_ ,a_ )
__UpperCamelCase : Any =max(dp_array[row][col] ,a_ )
else:
__UpperCamelCase : Dict =0
return largest_square_area
def A ( a_ ,a_ ,a_ ) -> int:
__UpperCamelCase : Any =[0] * (cols + 1)
__UpperCamelCase : List[Any] =[0] * (cols + 1)
__UpperCamelCase : Tuple =0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
__UpperCamelCase : Any =current_row[col + 1]
__UpperCamelCase : Optional[Any] =next_row[col + 1]
__UpperCamelCase : Union[str, Any] =next_row[col]
if mat[row][col] == 1:
__UpperCamelCase : Any =1 + min(a_ ,a_ ,a_ )
__UpperCamelCase : Optional[int] =max(current_row[col] ,a_ )
else:
__UpperCamelCase : List[str] =0
__UpperCamelCase : Optional[Any] =current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 71 | 1 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCAmelCase = 25_6047
UpperCAmelCase = 25_6145
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = NllbTokenizer
__snake_case = NllbTokenizerFast
__snake_case = True
__snake_case = True
__snake_case = {}
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = NllbTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
snake_case_ = NllbTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
snake_case_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
snake_case_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
snake_case_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def UpperCamelCase__ ( self ):
snake_case_ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
snake_case_ = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(_UpperCAmelCase )
snake_case_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
snake_case_ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
snake_case_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
snake_case_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
snake_case_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
snake_case_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
snake_case_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
@require_torch
def UpperCamelCase__ ( self ):
if not self.test_seqaseq:
return
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Longer text that will definitely require truncation.
snake_case_ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
snake_case_ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
snake_case_ = tokenizer.prepare_seqaseq_batch(
src_texts=_UpperCAmelCase , tgt_texts=_UpperCAmelCase , max_length=3 , max_target_length=10 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
snake_case_ = tokenizer.prepare_seqaseq_batch(
_UpperCAmelCase , tgt_texts=_UpperCAmelCase , max_length=3 , return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
snake_case_ = tokenizer.prepare_seqaseq_batch(
src_texts=_UpperCAmelCase , max_length=3 , max_target_length=10 , return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('''decoder_input_ids''' , _UpperCAmelCase )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = [AddedToken('''<special>''' , lstrip=_UpperCAmelCase )]
snake_case_ = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase )
snake_case_ = tokenizer_r.encode('''Hey this is a <special> token''' )
snake_case_ = tokenizer_r.encode('''<special>''' , add_special_tokens=_UpperCAmelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
snake_case_ = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
snake_case_ = self.tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase )
snake_case_ = tokenizer_p.encode('''Hey this is a <special> token''' )
snake_case_ = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
__snake_case = "facebook/nllb-200-distilled-600M"
__snake_case = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__snake_case = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__snake_case = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def UpperCamelCase__ ( cls ):
snake_case_ = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' )
snake_case_ = 1
return cls
def UpperCamelCase__ ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 25_60_57 )
def UpperCamelCase__ ( self ):
snake_case_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
# fmt: off
snake_case_ = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
snake_case_ = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
snake_case_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , _UpperCAmelCase )
snake_case_ = 10
snake_case_ = self.tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , _UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_62_03, 3] )
def UpperCamelCase__ ( self ):
snake_case_ = tempfile.mkdtemp()
snake_case_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCAmelCase )
snake_case_ = NllbTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCAmelCase )
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
snake_case_ = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
snake_case_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCamelCase__ ( self ):
snake_case_ = self.tokenizer(self.src_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=3 , return_tensors='''pt''' )
snake_case_ = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10 , return_tensors='''pt''' )
snake_case_ = targets['''input_ids''']
snake_case_ = shift_tokens_right(
_UpperCAmelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[25_60_47, 70, 73_56, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_60_57,
} , )
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = True
snake_case_ = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
snake_case_ = False
snake_case_ = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] ) | 267 |
import numpy as np
import datasets
UpperCAmelCase = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
UpperCAmelCase = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
UpperCAmelCase = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
# convert to numpy arrays
snake_case_ = np.array(_UpperCAmelCase )
snake_case_ = np.array(_UpperCAmelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
snake_case_ = X - np.mean(_UpperCAmelCase )
snake_case_ = np.cov(reference_distribution.T )
try:
snake_case_ = np.linalg.inv(_UpperCAmelCase )
except np.linalg.LinAlgError:
snake_case_ = np.linalg.pinv(_UpperCAmelCase )
snake_case_ = np.dot(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = np.dot(_UpperCAmelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 267 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 140 | import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_UpperCAmelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
_UpperCAmelCase = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
_UpperCAmelCase = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
_UpperCAmelCase = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(64, 64)
)
_UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image)
_UpperCAmelCase = np.expand_dims(test_image, axis=0)
_UpperCAmelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_UpperCAmelCase = """Normal"""
if result[0][0] == 1:
_UpperCAmelCase = """Abnormality detected"""
| 140 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Union[str, Any] = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Tuple:
lowercase : Union[str, Any] = DPTConfig()
if "large" in checkpoint_url:
lowercase : int = 1_024
lowercase : Tuple = 4_096
lowercase : Tuple = 24
lowercase : Dict = 16
lowercase : Tuple = [5, 11, 17, 23]
lowercase : Dict = [256, 512, 1_024, 1_024]
lowercase : Tuple = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase : Optional[int] = True
lowercase : Union[str, Any] = 150
lowercase : Dict = """huggingface/label-files"""
lowercase : Any = """ade20k-id2label.json"""
lowercase : Optional[int] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) ) , """r""" ) )
lowercase : Dict = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
lowercase : Optional[Any] = idalabel
lowercase : List[Any] = {v: k for k, v in idalabel.items()}
lowercase : Any = [1, 150, 480, 480]
return config, expected_shape
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
lowercase : str = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Dict:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase : Any = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowercase : str = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowercase : Optional[Any] = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
lowercase : Optional[Any] = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowercase : str = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowercase : Dict = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowercase : int = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowercase : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase : Optional[int] = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
lowercase : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase : Optional[int] = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowercase : List[str] = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowercase : int = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowercase : List[str] = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowercase : Optional[int] = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowercase : Any = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowercase : Union[str, Any] = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowercase : Optional[int] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase : List[Any] = name.replace(f"refinenet{layer_idx}" , f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
lowercase : Optional[Any] = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowercase : int = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowercase : int = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowercase : int = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowercase : Dict = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase : int = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase : List[str] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase : Optional[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase : int = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase : Any = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowercase : Any = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowercase : Tuple = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowercase : Tuple = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowercase : Union[str, Any] = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowercase : List[str] = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowercase : Tuple = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowercase : List[str] = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowercase : Any = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowercase : Optional[int] = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowercase : str = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowercase : Optional[Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : int = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
lowercase : Optional[int] = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Dict = in_proj_weight[: config.hidden_size, :]
lowercase : Dict = in_proj_bias[: config.hidden_size]
lowercase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Dict = in_proj_weight[
-config.hidden_size :, :
]
lowercase : str = in_proj_bias[-config.hidden_size :]
def _snake_case( ) -> Tuple:
lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : str = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : Union[str, Any] = get_dpt_config(lowerCamelCase_ )
# load original state_dict from URL
lowercase : str = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(lowerCamelCase_ )
# rename keys
for key in state_dict.copy().keys():
lowercase : Optional[int] = state_dict.pop(lowerCamelCase_ )
lowercase : Tuple = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_ )
# load HuggingFace model
lowercase : Optional[int] = DPTForSemanticSegmentation(lowerCamelCase_ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# Check outputs on an image
lowercase : int = 480 if """ade""" in checkpoint_url else 384
lowercase : Union[str, Any] = DPTImageProcessor(size=lowerCamelCase_ )
lowercase : List[str] = prepare_img()
lowercase : Tuple = image_processor(lowerCamelCase_ , return_tensors="""pt""" )
# forward pass
lowercase : Optional[int] = model(**lowerCamelCase_ ).logits if """ade""" in checkpoint_url else model(**lowerCamelCase_ ).predicted_depth
# Assert logits
lowercase : Any = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
lowercase : List[Any] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(lowerCamelCase_ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_ )
)
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you\'re pushing to the hub.""",
)
lowercase : Optional[int] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 358 |
class __snake_case :
def __init__( self ,snake_case ,snake_case=None ,snake_case=None ):
'''simple docstring'''
lowercase : Tuple = data
lowercase : List[Any] = previous
lowercase : List[str] = next_node
def __str__( self ):
'''simple docstring'''
return f"{self.data}"
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.data
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.next
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.previous
class __snake_case :
def __init__( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = head
def __iter__( self ):
'''simple docstring'''
return self
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
lowercase : Union[str, Any] = self.current.get_data()
lowercase : Optional[Any] = self.current.get_next()
return value
class __snake_case :
def __init__( self ):
'''simple docstring'''
lowercase : str = None # First node in list
lowercase : str = None # Last node in list
def __str__( self ):
'''simple docstring'''
lowercase : int = self.head
lowercase : str = []
while current is not None:
nodes.append(current.get_data() )
lowercase : Dict = current.get_next()
return " ".join(str(snake_case ) for node in nodes )
def __contains__( self ,snake_case ):
'''simple docstring'''
lowercase : Dict = self.head
while current:
if current.get_data() == value:
return True
lowercase : Any = current.get_next()
return False
def __iter__( self ):
'''simple docstring'''
return LinkedListIterator(self.head )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.head is None:
lowercase : Any = node
lowercase : Dict = node
else:
self.insert_before_node(self.head ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.head is None:
self.set_head(snake_case )
else:
self.insert_after_node(self.tail ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Dict = Node(snake_case )
if self.head is None:
self.set_head(snake_case )
else:
self.set_tail(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = node
lowercase : Optional[int] = node.previous
if node.get_previous() is None:
lowercase : Optional[int] = node_to_insert
else:
lowercase : Optional[int] = node_to_insert
lowercase : List[Any] = node_to_insert
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Dict = node
lowercase : List[str] = node.next
if node.get_next() is None:
lowercase : Union[str, Any] = node_to_insert
else:
lowercase : List[str] = node_to_insert
lowercase : Dict = node_to_insert
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = 1
lowercase : List[str] = Node(snake_case )
lowercase : Any = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case ,snake_case )
return
current_position += 1
lowercase : List[Any] = node.next
self.insert_after_node(self.tail ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = self.head
while node:
if node.get_data() == item:
return node
lowercase : Any = node.get_next()
raise Exception("""Node not found""" )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if (node := self.get_node(snake_case )) is not None:
if node == self.head:
lowercase : Optional[Any] = self.head.get_next()
if node == self.tail:
lowercase : List[str] = self.tail.get_previous()
self.remove_node_pointers(snake_case )
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
if node.get_next():
lowercase : Optional[int] = node.previous
if node.get_previous():
lowercase : Union[str, Any] = node.next
lowercase : Union[str, Any] = None
lowercase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.head is None
def _snake_case( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 0 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def UpperCAmelCase__ (lowerCAmelCase_ = "" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(lowerCAmelCase_ ).text , "html.parser" )
__SCREAMING_SNAKE_CASE = soup.find_all("td" , attrs="titleColumn" )
__SCREAMING_SNAKE_CASE = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowerCAmelCase_ , lowerCAmelCase_ )
}
def UpperCAmelCase__ (lowerCAmelCase_ = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_imdb_top_aaa_movies()
with open(lowerCAmelCase_ , "w" , newline="" ) as out_file:
__SCREAMING_SNAKE_CASE = csv.writer(lowerCAmelCase_ )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 54 |
"""simple docstring"""
import math
import random
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
a__ : Tuple = 0.02
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(lowerCAmelCase_ ):
# Forward propagation
__SCREAMING_SNAKE_CASE = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE = (expected / 100) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE = layer_1_error * sigmoid_function(lowerCAmelCase_ , lowerCAmelCase_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = int(input('''Expected value: '''))
a__ : str = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 54 | 1 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE_ = {"""facebook/bart-base""": BartForConditionalGeneration}
SCREAMING_SNAKE_CASE_ = {"""facebook/bart-base""": BartTokenizer}
def __lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_SCREAMING_SNAKE_CASE , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
"""--config_name""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_SCREAMING_SNAKE_CASE , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Where to store the final ONNX file.""" )
SCREAMING_SNAKE_CASE = parser.parse_args()
return args
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = model_dict[model_name].from_pretrained(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = tokenizer_dict[model_name].from_pretrained(_SCREAMING_SNAKE_CASE )
if model_name in ["facebook/bart-base"]:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
return huggingface_model, tokenizer
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
model.eval()
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = torch.jit.script(BARTBeamSearchGenerator(_SCREAMING_SNAKE_CASE ) )
with torch.no_grad():
SCREAMING_SNAKE_CASE = """My friends are cool but they eat too many carbs."""
SCREAMING_SNAKE_CASE = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors="""pt""" ).to(model.device )
SCREAMING_SNAKE_CASE = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , early_stopping=_SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_SCREAMING_SNAKE_CASE , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _SCREAMING_SNAKE_CASE , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_SCREAMING_SNAKE_CASE , )
logger.info("""Model exported to {}""".format(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE = remove_dup_initializers(os.path.abspath(_SCREAMING_SNAKE_CASE ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE = onnxruntime.InferenceSession(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = ort_sess.run(
_SCREAMING_SNAKE_CASE , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_SCREAMING_SNAKE_CASE ),
"""max_length""": np.array(_SCREAMING_SNAKE_CASE ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def __lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parse_args()
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE = torch.device(args.device )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = load_model_tokenizer(args.model_name_or_path , _SCREAMING_SNAKE_CASE )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_SCREAMING_SNAKE_CASE )
if args.max_length:
SCREAMING_SNAKE_CASE = args.max_length
if args.num_beams:
SCREAMING_SNAKE_CASE = args.num_beams
if args.output_file_path:
SCREAMING_SNAKE_CASE = args.output_file_path
else:
SCREAMING_SNAKE_CASE = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 362 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Any = BertTokenizer
__snake_case : Dict = BertTokenizerFast
__snake_case : Tuple = True
__snake_case : List[Any] = True
__snake_case : Optional[Any] = filter_non_english
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase__ ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
# With lower casing
SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer()
SCREAMING_SNAKE_CASE = """a\n'll !!to?'d of, can't."""
SCREAMING_SNAKE_CASE = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=lowerCamelCase__ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ,lowerCamelCase__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(lowerCamelCase__ ,"""do_lower_case""" ) else False
SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCamelCase__ )
]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
| 193 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class a :
def __init__( self : Union[str, Any] ):
snake_case_ = []
snake_case_ = 0
snake_case_ = 0
def A_ ( self : Tuple ):
return self.head == self.tail
def A_ ( self : int , lowercase_ : Any ):
self.data.append(lowercase_ )
snake_case_ = self.tail + 1
def A_ ( self : List[str] ):
snake_case_ = self.data[self.head]
snake_case_ = self.head + 1
return ret
def A_ ( self : Tuple ):
return self.tail - self.head
def A_ ( self : Any ):
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class a :
def __init__( self : Optional[int] , lowercase_ : Any ):
snake_case_ = data
snake_case_ = None
snake_case_ = None
snake_case_ = 1
def A_ ( self : List[str] ):
return self.data
def A_ ( self : str ):
return self.left
def A_ ( self : Any ):
return self.right
def A_ ( self : Tuple ):
return self.height
def A_ ( self : Any , lowercase_ : Any ):
snake_case_ = data
def A_ ( self : str , lowercase_ : MyNode | None ):
snake_case_ = node
def A_ ( self : Dict , lowercase_ : MyNode | None ):
snake_case_ = node
def A_ ( self : Any , lowercase_ : int ):
snake_case_ = height
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
if a > b:
return a
return b
def __magic_name__ ( __UpperCAmelCase ) -> MyNode:
'''simple docstring'''
print('''left rotation node:''', node.get_data() )
snake_case_ = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__UpperCAmelCase )
snake_case_ = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(__UpperCAmelCase )
snake_case_ = my_max(get_height(ret.get_right() ), get_height(ret.get_left() ) ) + 1
ret.set_height(__UpperCAmelCase )
return ret
def __magic_name__ ( __UpperCAmelCase ) -> MyNode:
'''simple docstring'''
print('''right rotation node:''', node.get_data() )
snake_case_ = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__UpperCAmelCase )
snake_case_ = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(__UpperCAmelCase )
snake_case_ = my_max(get_height(ret.get_right() ), get_height(ret.get_left() ) ) + 1
ret.set_height(__UpperCAmelCase )
return ret
def __magic_name__ ( __UpperCAmelCase ) -> MyNode:
'''simple docstring'''
snake_case_ = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__UpperCAmelCase ) )
return right_rotation(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> MyNode:
'''simple docstring'''
snake_case_ = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__UpperCAmelCase ) )
return left_rotation(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> MyNode | None:
'''simple docstring'''
if node is None:
return MyNode(__UpperCAmelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left(), __UpperCAmelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
snake_case_ = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
snake_case_ = right_rotation(__UpperCAmelCase )
else:
snake_case_ = lr_rotation(__UpperCAmelCase )
else:
node.set_right(insert_node(node.get_right(), __UpperCAmelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
snake_case_ = node.get_right()
assert right_child is not None
if data < right_child.get_data():
snake_case_ = rl_rotation(__UpperCAmelCase )
else:
snake_case_ = left_rotation(__UpperCAmelCase )
snake_case_ = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(__UpperCAmelCase )
return node
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
while True:
snake_case_ = root.get_right()
if right_child is None:
break
snake_case_ = right_child
return root.get_data()
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
while True:
snake_case_ = root.get_left()
if left_child is None:
break
snake_case_ = left_child
return root.get_data()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> MyNode | None:
'''simple docstring'''
snake_case_ = root.get_left()
snake_case_ = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
snake_case_ = get_left_most(__UpperCAmelCase )
root.set_data(__UpperCAmelCase )
root.set_right(del_node(__UpperCAmelCase, __UpperCAmelCase ) )
elif left_child is not None:
snake_case_ = left_child
elif right_child is not None:
snake_case_ = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(__UpperCAmelCase, __UpperCAmelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__UpperCAmelCase, __UpperCAmelCase ) )
if get_height(__UpperCAmelCase ) - get_height(__UpperCAmelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
snake_case_ = left_rotation(__UpperCAmelCase )
else:
snake_case_ = rl_rotation(__UpperCAmelCase )
elif get_height(__UpperCAmelCase ) - get_height(__UpperCAmelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
snake_case_ = right_rotation(__UpperCAmelCase )
else:
snake_case_ = lr_rotation(__UpperCAmelCase )
snake_case_ = my_max(get_height(root.get_right() ), get_height(root.get_left() ) ) + 1
root.set_height(__UpperCAmelCase )
return root
class a :
def __init__( self : Union[str, Any] ):
snake_case_ = None
def A_ ( self : Any ):
return get_height(self.root )
def A_ ( self : Any , lowercase_ : Any ):
print('''insert:''' + str(lowercase_ ) )
snake_case_ = insert_node(self.root , lowercase_ )
def A_ ( self : Dict , lowercase_ : Any ):
print('''delete:''' + str(lowercase_ ) )
if self.root is None:
print('''Tree is empty!''' )
return
snake_case_ = del_node(self.root , lowercase_ )
def __str__( self : List[Any] , ): # a level traversale, gives a more intuitive look on the tree
snake_case_ = ''''''
snake_case_ = MyQueue()
q.push(self.root )
snake_case_ = self.get_height()
if layer == 0:
return output
snake_case_ = 0
while not q.is_empty():
snake_case_ = q.pop()
snake_case_ = ''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowercase_ )
q.push(lowercase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
snake_case_ = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowercase_ ) - 1:
snake_case_ = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
a : List[Any] = AVLtree()
a : List[str] = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 56 |
from __future__ import annotations
from math import ceil, floor, sqrt
def __UpperCamelCase ( _lowerCAmelCase = 200_0000 ) -> int:
"""simple docstring"""
A : list[int] = [0]
A : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
A : int = 0
# the area corresponding to the grid that gives the product closest to target
A : int = 0
# an estimate of b, using the quadratic formula
A : float
# the largest integer less than b_estimate
A : int
# the largest integer less than b_estimate
A : int
# the triangle number corresponding to b_floor
A : int
# the triangle number corresponding to b_ceil
A : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
A : Union[str, Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
A : List[Any] = floor(_lowerCAmelCase )
A : Tuple = ceil(_lowerCAmelCase )
A : int = triangle_numbers[b_floor]
A : Dict = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
A : Optional[int] = triangle_b_first_guess * triangle_a
A : Optional[int] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
A : Tuple = triangle_b_second_guess * triangle_a
A : Tuple = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 116 | 0 |
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_a = 'bert-base-cased'
_a = 'google/pegasus-xsum'
_a = [' Sam ate lunch today.', 'Sams lunch ingredients.']
_a = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
_a = 'patrickvonplaten/t5-tiny-random'
_a = 'sshleifer/bart-tiny-random'
_a = 'sshleifer/tiny-mbart'
_a = 'sshleifer/tiny-marian-en-de'
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = '''\n'''.join(lowerCAmelCase__ )
Path(lowerCAmelCase__ ).open("w" ).writelines(lowerCAmelCase__ )
def __a ( __lowerCamelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowerCAmelCase__, f"""{split}.source""" ), lowerCAmelCase__ )
_dump_articles(os.path.join(lowerCAmelCase__, f"""{split}.target""" ), lowerCAmelCase__ )
return tmp_dir
class A_ (__lowercase ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ )
UpperCAmelCase_ : Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCAmelCase_ : Union[str, Any] = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES )
UpperCAmelCase_ : str = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES )
UpperCAmelCase_ : str = 4
UpperCAmelCase_ : Dict = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
UpperCAmelCase_ : Any = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
UpperCAmelCase_ : List[Any] = SeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path="train" , max_source_length=snake_case_ , max_target_length=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , )
UpperCAmelCase_ : Dict = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(snake_case_ , snake_case_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
UpperCAmelCase_ : Dict = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case_ )
UpperCAmelCase_ : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCAmelCase_ : Any = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES )
UpperCAmelCase_ : Any = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES )
UpperCAmelCase_ : Dict = 4
UpperCAmelCase_ : Optional[int] = LegacySeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path="train" , max_source_length=20 , max_target_length=snake_case_ , )
UpperCAmelCase_ : Optional[Any] = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
UpperCAmelCase_ : Any = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
UpperCAmelCase_ : Optional[int] = tmp_dir.joinpath("train.source" ).open().readlines()
UpperCAmelCase_ : List[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(snake_case_ , snake_case_ , 128 , snake_case_ )
UpperCAmelCase_ : Optional[Any] = {x.name for x in tmp_dir.iterdir()}
UpperCAmelCase_ : Union[str, Any] = {x.name for x in save_dir.iterdir()}
UpperCAmelCase_ : str = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(snake_case_ ) < len(snake_case_ )
assert len(snake_case_ ) == 1
assert len(packed_examples[0] ) == sum(len(snake_case_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
UpperCAmelCase_ : Any = self._get_dataset(max_len=64 )
UpperCAmelCase_ : int = 64
UpperCAmelCase_ : List[str] = ds.make_dynamic_sampler(snake_case_ , required_batch_size_multiple=snake_case_ )
UpperCAmelCase_ : List[str] = [len(snake_case_ ) for x in batch_sampler]
assert len(set(snake_case_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(snake_case_ ) == len(snake_case_ ) # no dropped or added examples
UpperCAmelCase_ : Union[str, Any] = DataLoader(snake_case_ , batch_sampler=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 )
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Union[str, Any] = []
for batch in data_loader:
UpperCAmelCase_ : Any = batch['''input_ids'''].shape
UpperCAmelCase_ : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
UpperCAmelCase_ : Optional[Any] = np.product(batch["input_ids"].shape )
num_src_per_batch.append(snake_case_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(snake_case_ )
assert num_src_per_batch[0] == max(snake_case_ )
if failures:
raise AssertionError(F"""too many tokens in {len(snake_case_ )} batches""" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self._get_dataset(max_len=512 )
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : str = ds.make_sortish_sampler(snake_case_ , shuffle=snake_case_ )
UpperCAmelCase_ : Tuple = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 )
UpperCAmelCase_ : Tuple = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=snake_case_ )
UpperCAmelCase_ : Optional[int] = tokenizer.pad_token_id
def count_pad_tokens(lowercase_ , lowercase_="input_ids" ):
return [batch[k].eq(snake_case_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(snake_case_ , k="labels" ) ) < sum(count_pad_tokens(snake_case_ , k="labels" ) )
assert sum(count_pad_tokens(snake_case_ ) ) < sum(count_pad_tokens(snake_case_ ) )
assert len(snake_case_ ) == len(snake_case_ )
def UpperCamelCase__ ( self , lowercase_=1000 , lowercase_=128 ):
"""simple docstring"""
if os.getenv("USE_REAL_DATA" , snake_case_ ):
UpperCAmelCase_ : Optional[int] = '''examples/seq2seq/wmt_en_ro'''
UpperCAmelCase_ : List[Any] = max_len * 2 * 64
if not Path(snake_case_ ).joinpath("train.len" ).exists():
save_len_file(snake_case_ , snake_case_ )
else:
UpperCAmelCase_ : int = '''examples/seq2seq/test_data/wmt_en_ro'''
UpperCAmelCase_ : List[str] = max_len * 4
save_len_file(snake_case_ , snake_case_ )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(snake_case_ )
UpperCAmelCase_ : Optional[int] = SeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path="train" , max_source_length=snake_case_ , max_target_length=snake_case_ , n_obs=snake_case_ , )
return ds, max_tokens, tokenizer
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self._get_dataset()
UpperCAmelCase_ : Optional[Any] = set(DistributedSortishSampler(snake_case_ , 256 , num_replicas=2 , rank=0 , add_extra_examples=snake_case_ ) )
UpperCAmelCase_ : Tuple = set(DistributedSortishSampler(snake_case_ , 256 , num_replicas=2 , rank=1 , add_extra_examples=snake_case_ ) )
assert idsa.intersection(snake_case_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case_ , use_fast=snake_case_ )
if tok_name == MBART_TINY:
UpperCAmelCase_ : Any = SeqaSeqDataset(
snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
UpperCAmelCase_ : Tuple = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
UpperCAmelCase_ : Optional[Any] = SeqaSeqDataset(
snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
UpperCAmelCase_ : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(snake_case_ ) == 1 if tok_name == BART_TINY else len(snake_case_ ) == 0
| 351 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
def __a ( ):
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ : Dict = parser.parse_args()
return args.f
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowercase_ , "argv" , lowercase_ ):
UpperCAmelCase_ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowercase_ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
| 23 | 0 |
'''simple docstring'''
import os
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = len(grid[0] )
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(n_rows - 3 ):
_lowerCAmelCase = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_lowerCAmelCase = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_lowerCAmelCase = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_lowerCAmelCase = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_lowerCAmelCase = max(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if max_product > largest:
_lowerCAmelCase = max_product
return largest
def __a():
'''simple docstring'''
_lowerCAmelCase = []
with open(os.path.dirname(SCREAMING_SNAKE_CASE_ ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
_lowerCAmelCase = [[int(SCREAMING_SNAKE_CASE_ ) for i in grid[j]] for j in range(len(SCREAMING_SNAKE_CASE_ ) )]
return largest_product(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(solution())
| 158 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
_SCREAMING_SNAKE_CASE = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
_SCREAMING_SNAKE_CASE = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_SCREAMING_SNAKE_CASE = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 158 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
lowerCAmelCase : List[Any] = StableDiffusionLDMaDPipeline
lowerCAmelCase : Tuple = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
def __A ( self ):
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
A__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A__ = CLIPTextModel(UpperCAmelCase__ )
A__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
A__ = torch.manual_seed(UpperCAmelCase__ )
else:
A__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
A__ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __A ( self ):
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionLDMaDPipeline(**UpperCAmelCase__ )
A__ = ldmad_pipe.to(UpperCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
A__ = self.get_dummy_inputs(UpperCAmelCase__ )
A__ = ldmad_pipe(**UpperCAmelCase__ )
A__ , A__ = output.rgb, output.depth
A__ = rgb[0, -3:, -3:, -1]
A__ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
A__ = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
A__ = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def __A ( self ):
A__ = self.get_dummy_components()
A__ = StableDiffusionLDMaDPipeline(**UpperCAmelCase__ )
A__ = ldmad_pipe.to(UpperCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
A__ = self.get_dummy_inputs(UpperCAmelCase__ )
A__ = 3 * [inputs["prompt"]]
# forward
A__ = ldmad_pipe(**UpperCAmelCase__ )
A__ , A__ = output.rgb, output.depth
A__ = rgb_slice_a[0, -3:, -3:, -1]
A__ = depth_slice_a[0, -3:, -1]
A__ = self.get_dummy_inputs(UpperCAmelCase__ )
A__ = 3 * [inputs.pop("prompt" )]
A__ = ldmad_pipe.tokenizer(
UpperCAmelCase__ , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=UpperCAmelCase__ , return_tensors="pt" , )
A__ = text_inputs["input_ids"].to(UpperCAmelCase__ )
A__ = ldmad_pipe.text_encoder(UpperCAmelCase__ )[0]
A__ = prompt_embeds
# forward
A__ = ldmad_pipe(**UpperCAmelCase__ )
A__ , A__ = output.rgb, output.depth
A__ = rgb_slice_a[0, -3:, -3:, -1]
A__ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def __A ( self ):
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = PNDMScheduler(skip_prk_steps=UpperCAmelCase__ )
A__ = StableDiffusionLDMaDPipeline(**UpperCAmelCase__ )
A__ = ldmad_pipe.to(UpperCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
A__ = self.get_dummy_inputs(UpperCAmelCase__ )
A__ = "french fries"
A__ = ldmad_pipe(**UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
A__ , A__ = output.rgb, output.depth
A__ = rgb[0, -3:, -3:, -1]
A__ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
A__ = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
A__ = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__="cpu" , UpperCAmelCase__=torch.floataa , UpperCAmelCase__=0 ):
A__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
A__ = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 64, 64) )
A__ = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
A__ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __A ( self ):
A__ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
A__ = ldmad_pipe.to(UpperCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
A__ = self.get_inputs(UpperCAmelCase__ )
A__ = ldmad_pipe(**UpperCAmelCase__ )
A__ , A__ = output.rgb, output.depth
A__ = rgb[0, -3:, -3:, -1].flatten()
A__ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
A__ = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
A__ = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__="cpu" , UpperCAmelCase__=torch.floataa , UpperCAmelCase__=0 ):
A__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
A__ = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 64, 64) )
A__ = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
A__ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __A ( self ):
A__ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(UpperCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
A__ = self.get_inputs(UpperCAmelCase__ )
A__ = ldmad_pipe(**UpperCAmelCase__ )
A__ , A__ = output.rgb, output.depth
A__ = 0.495_586
A__ = 0.33_795_515
A__ = 112.48_518
A__ = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def __A ( self ):
A__ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(UpperCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
A__ = self.get_inputs(UpperCAmelCase__ )
A__ = ldmad_pipe(**UpperCAmelCase__ )
A__ , A__ = output.rgb, output.depth
A__ = 0.4_194_127
A__ = 0.35_375_586
A__ = 0.5_638_502
A__ = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 358 |
def UpperCamelCase ( _A : str , _A : str )-> str:
"""simple docstring"""
A__ = len(_A )
A__ = len(_A )
A__ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
A__ = []
for char_count in range(_A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_A )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 198 | 0 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase (_UpperCAmelCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , """embed_dim""" ) )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , """num_heads""" ) )
class __UpperCAmelCase :
def __init__( self: str , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any]=13 , UpperCAmelCase_: List[str]=64 , UpperCAmelCase_: Any=3 , UpperCAmelCase_: Optional[Any]=[16, 48, 96] , UpperCAmelCase_: Union[str, Any]=[1, 3, 6] , UpperCAmelCase_: str=[1, 2, 10] , UpperCAmelCase_: int=[7, 3, 3] , UpperCAmelCase_: Optional[int]=[4, 2, 2] , UpperCAmelCase_: str=[2, 1, 1] , UpperCAmelCase_: List[str]=[2, 2, 2] , UpperCAmelCase_: Tuple=[False, False, True] , UpperCAmelCase_: Optional[int]=[0.0, 0.0, 0.0] , UpperCAmelCase_: List[Any]=0.02 , UpperCAmelCase_: Tuple=1E-12 , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: Optional[int]=True , UpperCAmelCase_: Optional[Any]=2 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_sizes
_SCREAMING_SNAKE_CASE = patch_stride
_SCREAMING_SNAKE_CASE = patch_padding
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = embed_dim
_SCREAMING_SNAKE_CASE = num_heads
_SCREAMING_SNAKE_CASE = stride_kv
_SCREAMING_SNAKE_CASE = depth
_SCREAMING_SNAKE_CASE = cls_token
_SCREAMING_SNAKE_CASE = attention_drop_rate
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self: str ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = CvtModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_SCREAMING_SNAKE_CASE = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_SCREAMING_SNAKE_CASE = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = CvtForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Dict = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__snake_case : Dict = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
__snake_case : Dict = False
__snake_case : Optional[int] = False
__snake_case : Optional[Any] = False
__snake_case : Any = False
__snake_case : Any = False
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = CvtModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return
@unittest.skip(reason="""Cvt does not output attentions""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[Any] ):
_SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = outputs.hidden_states
_SCREAMING_SNAKE_CASE = len(self.model_tester.depth )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = CvtModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCAmelCase (unittest.TestCase ):
@cached_property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ )
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 306 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCAmelCase :
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : int
__snake_case : int
__snake_case : float
__snake_case : float
__snake_case : Tuple[int]
def UpperCamelCase ( self: str ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape
_SCREAMING_SNAKE_CASE = int(np.prod(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = self.get_image_coords()
_SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE = self.get_camera_rays(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rays.view(UpperCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase ( self: Any , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE = coords.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = self.resolution()
_SCREAMING_SNAKE_CASE = self.fov()
_SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE = fracs.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = (
self.z.view(UpperCAmelCase_ , 1 , 3 )
+ self.x.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCAmelCase_ , *UpperCAmelCase_ , 2 , 3 )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase ( snake_case__ ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_SCREAMING_SNAKE_CASE = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE = -z * 4
_SCREAMING_SNAKE_CASE = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
_SCREAMING_SNAKE_CASE = np.cross(snake_case__ ,snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,width=snake_case__ ,height=snake_case__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(snake_case__ )) ,)
| 306 | 1 |
'''simple docstring'''
import qiskit
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =qiskit.Aer.get_backend('''aer_simulator''' )
SCREAMING_SNAKE_CASE__ : Tuple =qiskit.QuantumCircuit(4, 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0, 2 )
qc_ha.cx(1, 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0, 1, 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2, 0 ) # extract XOR value
qc_ha.measure(3, 1 ) # extract AND value
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE__ : Any =qiskit.execute(UpperCamelCase__, UpperCamelCase__, shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
a_ = half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''') | 355 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 222 | 0 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _snake_case ( snake_case__ : Dict ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ):
if args.student_type == "roberta":
A = False
elif args.student_type == "gpt2":
A = False
def _snake_case ( snake_case__ : Dict , snake_case__ : Any ):
if args.student_type == "roberta":
A = False
def _snake_case ( ):
A = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=snake_case__ , required=snake_case__ , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=snake_case__ , required=snake_case__ , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=snake_case__ , choices=['distilbert', 'roberta', 'gpt2'] , required=snake_case__ , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=snake_case__ , required=snake_case__ , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=snake_case__ , type=snake_case__ , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=snake_case__ , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=snake_case__ , required=snake_case__ , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=snake_case__ , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=snake_case__ , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=snake_case__ , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=snake_case__ , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=snake_case__ , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=snake_case__ , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=snake_case__ , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=snake_case__ , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=snake_case__ , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=snake_case__ , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=snake_case__ , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=snake_case__ , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=snake_case__ , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=snake_case__ , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=snake_case__ , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=snake_case__ , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=snake_case__ , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5e-4 , type=snake_case__ , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1e-6 , type=snake_case__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=snake_case__ , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.02 , type=snake_case__ , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=snake_case__ , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=snake_case__ , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=snake_case__ , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=snake_case__ , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=snake_case__ , default=500 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=snake_case__ , default=4000 , help='Checkpoint interval.' )
A = parser.parse_args()
sanity_checks(snake_case__ )
# ARGS #
init_gpu_params(snake_case__ )
set_seed(snake_case__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(F'Param: {args}' )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(snake_case__ ) , snake_case__ , indent=4 )
git_log(args.dump_path )
A , A , A = MODEL_CLASSES[args.student_type]
A , A , A = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
A = teacher_tokenizer_class.from_pretrained(args.teacher_name )
A = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
A = tokenizer.all_special_tokens.index(snake_case__ )
A = tokenizer.all_special_ids[idx]
logger.info(F'Special tokens {special_tok_ids}' )
A = special_tok_ids
A = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'Loading data from {args.data_file}' )
with open(args.data_file , 'rb' ) as fp:
A = pickle.load(snake_case__ )
if args.mlm:
logger.info(F'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , 'rb' ) as fp:
A = pickle.load(snake_case__ )
A = np.maximum(snake_case__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
A = 0.0 # do not predict special tokens
A = torch.from_numpy(snake_case__ )
else:
A = None
A = LmSeqsDataset(params=snake_case__ , data=snake_case__ )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F'Loading student config from {args.student_config}' )
A = student_config_class.from_pretrained(args.student_config )
A = True
if args.student_pretrained_weights is not None:
logger.info(F'Loading pretrained weights from {args.student_pretrained_weights}' )
A = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case__ )
else:
A = student_model_class(snake_case__ )
if args.n_gpu > 0:
student.to(F'cuda:{args.local_rank}' )
logger.info('Student loaded.' )
# TEACHER #
A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case__ )
if args.n_gpu > 0:
teacher.to(F'cuda:{args.local_rank}' )
logger.info(F'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case__ , snake_case__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case__ , snake_case__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
A = Distiller(
params=snake_case__ , dataset=snake_case__ , token_probs=snake_case__ , student=snake_case__ , teacher=snake_case__ )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main() | 74 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = StableDiffusionInstructPixaPixPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase ( self : Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
__lowerCAmelCase = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__lowerCAmelCase = CLIPTextModel(lowerCAmelCase_ )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple=0 ) -> Dict:
__lowerCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('RGB' )
if str(lowerCAmelCase_ ).startswith('mps' ):
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : List[str] ) -> Dict:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = 'french fries'
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : List[str] ) -> Any:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = [inputs['prompt']] * 2
__lowerCAmelCase = np.array(inputs['image'] ).astype(np.floataa ) / 2_55.0
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ ).unsqueeze(0 ).to(lowerCAmelCase_ )
__lowerCAmelCase = image / 2 + 0.5
__lowerCAmelCase = image.permute(0 , 3 , 1 , 2 )
__lowerCAmelCase = image.repeat(2 , 1 , 1 , 1 )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' )
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = sd_pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = [round(lowerCAmelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(lowerCAmelCase_ ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Optional[int] ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = VaeImageProcessor(do_resize=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' ) )[0]
__lowerCAmelCase = components['vae']
__lowerCAmelCase = self.get_dummy_inputs_by_type(lowerCAmelCase_ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__lowerCAmelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
__lowerCAmelCase = pipe(**lowerCAmelCase_ )[0]
__lowerCAmelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase_ , 1e-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : int ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : List[str] , lowerCAmelCase_ : List[Any]=0 ) -> Any:
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
__lowerCAmelCase = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : List[Any] ) -> str:
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Tuple ) -> List[str]:
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
__lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Optional[Any] ) -> Dict:
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ )
__lowerCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Optional[int] ) -> int:
__lowerCAmelCase = 0
def callback_fn(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : torch.FloatTensor ) -> None:
__lowerCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__lowerCAmelCase = latents[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__lowerCAmelCase = latents[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__lowerCAmelCase = False
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = self.get_inputs()
pipe(**lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase ( self : Optional[int] ) -> Any:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=lowerCAmelCase_ , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**lowerCAmelCase_ )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def lowercase ( self : List[Any] ) -> Any:
__lowerCAmelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__lowerCAmelCase = inputs['image'].resize((5_0_4, 5_0_4) )
__lowerCAmelCase = 'timbrooks/instruct-pix2pix'
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
__lowerCAmelCase = pipe(**lowerCAmelCase_ )
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
__lowerCAmelCase = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 284 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase = ["small", "medium", "large"]
__UpperCamelCase = "lm_head.decoder.weight"
__UpperCamelCase = "lm_head.weight"
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase )
__snake_case : Optional[int] = d.pop(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
__UpperCamelCase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
__UpperCamelCase = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 13 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 13 | 1 |
def UpperCAmelCase__ ( lowerCamelCase ):
if len(lowerCamelCase ) <= 1:
return [tuple(lowerCamelCase )]
lowercase :Tuple = []
def generate(lowerCamelCase, lowerCamelCase ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1, lowerCamelCase )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowercase , lowercase :Tuple = arr[k - 1], arr[i]
else: # k is odd
lowercase , lowercase :Dict = arr[k - 1], arr[0]
generate(k - 1, lowerCamelCase )
generate(len(lowerCamelCase ), lowerCamelCase )
return res
if __name__ == "__main__":
_UpperCAmelCase : Dict = input("Enter numbers separated by a comma:\n").strip()
_UpperCAmelCase : Optional[int] = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 236 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self: List[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict=7 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: List[Any]=18 , _lowerCAmelCase: int=30 , _lowerCAmelCase: Union[str, Any]=4_00 , _lowerCAmelCase: Any=True , _lowerCAmelCase: Union[str, Any]=32 , _lowerCAmelCase: int=True , ):
lowercase :Any = parent
lowercase :List[str] = batch_size
lowercase :Any = num_channels
lowercase :Optional[Any] = image_size
lowercase :Optional[Any] = min_resolution
lowercase :str = max_resolution
lowercase :str = do_resize
lowercase :Optional[int] = size_divisor
lowercase :Tuple = do_rescale
def SCREAMING_SNAKE_CASE ( self: Any ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowerCAmelCase ( lowerCAmelCase , unittest.TestCase):
_a = GLPNImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :int = GLPNImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self: str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "size_divisor" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "resample" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "do_rescale" ) )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
pass
def SCREAMING_SNAKE_CASE ( self: Any ):
# Initialize image_processing
lowercase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase :str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE ( self: Dict ):
# Initialize image_processing
lowercase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase :Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
# Initialize image_processing
lowercase :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase :Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 236 | 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A: int = logging.get_logger(__name__)
A: Tuple = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : int = 'conditional_detr'
__lowerCAmelCase : List[Any] = ['past_key_values']
__lowerCAmelCase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.25 , **_SCREAMING_SNAKE_CASE , ) -> Tuple:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCAmelCase : str = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Union[str, Any] = backbone_config.get("""model_type""" )
UpperCAmelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : Dict = config_class.from_dict(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = use_timm_backbone
UpperCAmelCase : int = backbone_config
UpperCAmelCase : str = num_channels
UpperCAmelCase : Tuple = num_queries
UpperCAmelCase : Optional[Any] = d_model
UpperCAmelCase : str = encoder_ffn_dim
UpperCAmelCase : Union[str, Any] = encoder_layers
UpperCAmelCase : List[str] = encoder_attention_heads
UpperCAmelCase : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase : Dict = decoder_layers
UpperCAmelCase : str = decoder_attention_heads
UpperCAmelCase : Dict = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Union[str, Any] = activation_dropout
UpperCAmelCase : Optional[Any] = activation_function
UpperCAmelCase : Any = init_std
UpperCAmelCase : List[str] = init_xavier_std
UpperCAmelCase : int = encoder_layerdrop
UpperCAmelCase : Optional[int] = decoder_layerdrop
UpperCAmelCase : Tuple = encoder_layers
UpperCAmelCase : Optional[int] = auxiliary_loss
UpperCAmelCase : Optional[int] = position_embedding_type
UpperCAmelCase : Tuple = backbone
UpperCAmelCase : Optional[Any] = use_pretrained_backbone
UpperCAmelCase : int = dilation
# Hungarian matcher
UpperCAmelCase : List[Any] = class_cost
UpperCAmelCase : List[str] = bbox_cost
UpperCAmelCase : Optional[Any] = giou_cost
# Loss coefficients
UpperCAmelCase : Any = mask_loss_coefficient
UpperCAmelCase : List[Any] = dice_loss_coefficient
UpperCAmelCase : Union[str, Any] = cls_loss_coefficient
UpperCAmelCase : int = bbox_loss_coefficient
UpperCAmelCase : str = giou_loss_coefficient
UpperCAmelCase : List[str] = focal_alpha
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.d_model
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCAmelCase : Tuple = self.backbone_config.to_dict()
UpperCAmelCase : Dict = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : str = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def SCREAMING_SNAKE_CASE ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return 12
| 76 |
"""simple docstring"""
import os
from collections.abc import Iterator
def _snake_case ( UpperCamelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(UpperCamelCase ):
UpperCAmelCase : List[Any] = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(UpperCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(UpperCamelCase , UpperCamelCase ).lstrip("""./""" )
def _snake_case ( UpperCamelCase : Union[str, Any] ):
return F"{i * ' '}*" if i else "\n##"
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : List[str] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(UpperCamelCase ) or old_parts[i] != new_part) and new_part:
print(F"{md_prefix(UpperCamelCase )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def _snake_case ( UpperCamelCase : str = "." ):
UpperCAmelCase : Union[str, Any] = """"""
for filepath in sorted(good_file_paths(UpperCamelCase ) ):
UpperCAmelCase , UpperCAmelCase : Any = os.path.split(UpperCamelCase )
if filepath != old_path:
UpperCAmelCase : Optional[int] = print_path(UpperCamelCase , UpperCamelCase )
UpperCAmelCase : str = (filepath.count(os.sep ) + 1) if filepath else 0
UpperCAmelCase : Optional[int] = F"{filepath}/{filename}".replace(""" """ , """%20""" )
UpperCAmelCase : Optional[int] = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F"{md_prefix(UpperCamelCase )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md(".")
| 76 | 1 |
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , ):
lowercase__ : str = cipher_alphabet or [chr(__lowercase ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowercase__ : Tuple = {
'a': 0.0_8_4_9_7,
'b': 0.0_1_4_9_2,
'c': 0.0_2_2_0_2,
'd': 0.0_4_2_5_3,
'e': 0.1_1_1_6_2,
'f': 0.0_2_2_2_8,
'g': 0.0_2_0_1_5,
'h': 0.0_6_0_9_4,
'i': 0.0_7_5_4_6,
'j': 0.0_0_1_5_3,
'k': 0.0_1_2_9_2,
'l': 0.0_4_0_2_5,
'm': 0.0_2_4_0_6,
'n': 0.0_6_7_4_9,
'o': 0.0_7_5_0_7,
'p': 0.0_1_9_2_9,
'q': 0.0_0_0_9_5,
'r': 0.0_7_5_8_7,
's': 0.0_6_3_2_7,
't': 0.0_9_3_5_6,
'u': 0.0_2_7_5_8,
'v': 0.0_0_9_7_8,
'w': 0.0_2_5_6_0,
'x': 0.0_0_1_5_0,
'y': 0.0_1_9_9_4,
'z': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
lowercase__ : Any = frequencies_dict
if not case_sensitive:
lowercase__ : Union[str, Any] = ciphertext.lower()
# Chi squared statistic values
lowercase__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(__lowercase ) ):
lowercase__ : Any = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowercase__ : str = (alphabet_letters.index(letter.lower() ) - shift) % len(
__lowercase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowercase__ : Optional[int] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowercase__ : Union[str, Any] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowercase__ : int = decrypted_with_shift.lower().count(__lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowercase__ : Union[str, Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowercase__ : Tuple = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowercase__ : str = decrypted_with_shift.count(__lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowercase__ : List[str] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowercase__ : Union[str, Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowercase__ : Union[str, Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCAmelCase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowercase__ : int = min(
__lowercase , key=__lowercase , )
# Get all the data from the most likely cipher (key, decoded message)
(
lowercase__
) : Optional[int] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 198 | import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_UpperCAmelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
_UpperCAmelCase = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
_UpperCAmelCase = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
_UpperCAmelCase = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(64, 64)
)
_UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image)
_UpperCAmelCase = np.expand_dims(test_image, axis=0)
_UpperCAmelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_UpperCAmelCase = """Normal"""
if result[0][0] == 1:
_UpperCAmelCase = """Abnormality detected"""
| 140 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : bool = False ):
if not isinstance(snake_case_ , snake_case_ ):
snake_case__ : str = F'''Expected string as input, found {type(snake_case_ )}'''
raise ValueError(snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
snake_case__ : Optional[int] = F'''Expected boolean as use_pascal parameter, found {type(snake_case_ )}'''
raise ValueError(snake_case_ )
snake_case__ : List[Any] = input_str.split("_" )
snake_case__ : Union[str, Any] = 0 if use_pascal else 1
snake_case__ : str = words[start_index:]
snake_case__ : str = [word[0].upper() + word[1:] for word in words_to_capitalize]
snake_case__ : Any = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 286 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = SMALL_MODEL_IDENTIFIER
snake_case__ : Any = "pt"
snake_case__ : Any = "tf"
def _lowercase ( self : Union[str, Any] , __A : List[Any] ):
snake_case__ : int = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__A )
def _lowercase ( self : Optional[int] , __A : Tuple ):
snake_case__ : List[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__A )
model_tf.save_pretrained(__A )
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = "mock_framework"
# Framework provided - return whatever the user provides
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model , __A )
self.assertEqual(__A , __A )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
snake_case__ : Optional[int] = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
snake_case__ : int = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
def _lowercase ( self : Dict ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
snake_case__ : List[str] = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
snake_case__ : Tuple = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__A ):
snake_case__ : int = FeaturesManager.determine_framework(__A )
def _lowercase ( self : Dict ):
snake_case__ : Dict = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ):
snake_case__ : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
snake_case__ : Tuple = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_torch_available" , __A ):
snake_case__ : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_tf )
# Both in environment -> use PyTorch
snake_case__ : Dict = MagicMock(return_value=__A )
snake_case__ : Optional[int] = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ), patch(
"transformers.onnx.features.is_torch_available" , __A ):
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# Both not in environment -> raise error
snake_case__ : List[str] = MagicMock(return_value=__A )
snake_case__ : Optional[Any] = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ), patch(
"transformers.onnx.features.is_torch_available" , __A ):
with self.assertRaises(__A ):
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
| 286 | 1 |
from __future__ import annotations
def UpperCamelCase ( _A ):
"""simple docstring"""
if not nums:
return 0
__magic_name__ : Dict = nums[0]
__magic_name__ : Union[str, Any] = 0
for num in nums[1:]:
__magic_name__ ,__magic_name__ : Tuple = (
max_excluding + num,
max(_UpperCamelCase, _UpperCamelCase ),
)
return max(_UpperCamelCase, _UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = True
@register_to_config
def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (64,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 4 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 0.18_215 , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
__lowerCamelCase = Encoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , )
# pass init params to Decoder
__lowerCamelCase = Decoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , )
__lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCamelCase = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
__lowerCamelCase = False
__lowerCamelCase = False
# only relevant if vae tiling is enabled
__lowerCamelCase = self.config.sample_size
__lowerCamelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCamelCase = 0.25
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , (Encoder, Decoder) ):
__lowerCamelCase = value
def lowerCamelCase ( self , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = use_tiling
def lowerCamelCase ( self ):
'''simple docstring'''
self.enable_tiling(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = True
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''set_processor''' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return processors
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''set_processor''' ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
module.set_processor(__UpperCAmelCase )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase )
if self.use_slicing and x.shape[0] > 1:
__lowerCamelCase = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )]
__lowerCamelCase = torch.cat(__UpperCAmelCase )
else:
__lowerCamelCase = self.encoder(__UpperCAmelCase )
__lowerCamelCase = self.quant_conv(__UpperCAmelCase )
__lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase )
__lowerCamelCase = self.post_quant_conv(__UpperCAmelCase )
__lowerCamelCase = self.decoder(__UpperCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
@apply_forward_hook
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
__lowerCamelCase = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )]
__lowerCamelCase = torch.cat(__UpperCAmelCase )
else:
__lowerCamelCase = self._decode(__UpperCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = min(a.shape[2] , b.shape[2] , __UpperCAmelCase )
for y in range(__UpperCAmelCase ):
__lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = min(a.shape[3] , b.shape[3] , __UpperCAmelCase )
for x in range(__UpperCAmelCase ):
__lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCamelCase = []
for i in range(0 , x.shape[2] , __UpperCAmelCase ):
__lowerCamelCase = []
for j in range(0 , x.shape[3] , __UpperCAmelCase ):
__lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCamelCase = self.encoder(__UpperCAmelCase )
__lowerCamelCase = self.quant_conv(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
__lowerCamelCase = []
for i, row in enumerate(__UpperCAmelCase ):
__lowerCamelCase = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) )
__lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 )
__lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCamelCase = []
for i in range(0 , z.shape[2] , __UpperCAmelCase ):
__lowerCamelCase = []
for j in range(0 , z.shape[3] , __UpperCAmelCase ):
__lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCamelCase = self.post_quant_conv(__UpperCAmelCase )
__lowerCamelCase = self.decoder(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
__lowerCamelCase = []
for i, row in enumerate(__UpperCAmelCase ):
__lowerCamelCase = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) )
__lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ):
'''simple docstring'''
__lowerCamelCase = sample
__lowerCamelCase = self.encode(__UpperCAmelCase ).latent_dist
if sample_posterior:
__lowerCamelCase = posterior.sample(generator=__UpperCAmelCase )
else:
__lowerCamelCase = posterior.mode()
__lowerCamelCase = self.decode(__UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
| 330 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Union[str, Any] = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ['''GLPNFeatureExtractor''']
UpperCAmelCase_ : str = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 62 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Union[str, Any] = """char"""
snake_case__ : Optional[int] = """bpe"""
snake_case__ : Dict = """wp"""
UpperCAmelCase_ : List[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : List[Any] = ["""image_processor""", """char_tokenizer"""]
snake_case__ : Dict = """ViTImageProcessor"""
snake_case__ : List[str] = """MgpstrTokenizer"""
def __init__( self : Optional[int] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , **__lowerCamelCase : Any ):
UpperCamelCase :Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowerCamelCase , )
UpperCamelCase :Optional[int] = kwargs.pop("""feature_extractor""" )
UpperCamelCase :List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
UpperCamelCase :Optional[int] = tokenizer
UpperCamelCase :int = AutoTokenizer.from_pretrained("""gpt2""" )
UpperCamelCase :int = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : str , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : str=None , **__lowerCamelCase : Dict ):
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
UpperCamelCase :Tuple = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is not None:
UpperCamelCase :Any = self.char_tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase :Dict = encodings["""input_ids"""]
return inputs
def _A ( self : Tuple , __lowerCamelCase : str ):
UpperCamelCase , UpperCamelCase , UpperCamelCase :int = sequences
UpperCamelCase :Tuple = char_preds.size(0 )
UpperCamelCase , UpperCamelCase :str = self._decode_helper(__lowerCamelCase , """char""" )
UpperCamelCase , UpperCamelCase :List[Any] = self._decode_helper(__lowerCamelCase , """bpe""" )
UpperCamelCase , UpperCamelCase :List[Any] = self._decode_helper(__lowerCamelCase , """wp""" )
UpperCamelCase :Any = []
UpperCamelCase :str = []
for i in range(__lowerCamelCase ):
UpperCamelCase :Union[str, Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCamelCase :Any = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCamelCase :str = scores.index(max(__lowerCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCamelCase :Optional[Any] = {}
UpperCamelCase :Dict = final_strs
UpperCamelCase :Union[str, Any] = final_scores
UpperCamelCase :List[str] = char_strs
UpperCamelCase :Tuple = bpe_strs
UpperCamelCase :Optional[Any] = wp_strs
return out
def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
if format == DecodeType.CHARACTER:
UpperCamelCase :List[str] = self.char_decode
UpperCamelCase :Union[str, Any] = 1
UpperCamelCase :Optional[Any] = """[s]"""
elif format == DecodeType.BPE:
UpperCamelCase :Union[str, Any] = self.bpe_decode
UpperCamelCase :str = 2
UpperCamelCase :int = """#"""
elif format == DecodeType.WORDPIECE:
UpperCamelCase :int = self.wp_decode
UpperCamelCase :Any = 102
UpperCamelCase :int = """[SEP]"""
else:
raise ValueError(F"""Format {format} is not supported.""" )
UpperCamelCase , UpperCamelCase :int = [], []
UpperCamelCase :Any = pred_logits.size(0 )
UpperCamelCase :List[Any] = pred_logits.size(1 )
UpperCamelCase , UpperCamelCase :Optional[int] = pred_logits.topk(1 , dim=-1 , largest=__lowerCamelCase , sorted=__lowerCamelCase )
UpperCamelCase :Optional[Any] = preds_index.view(-1 , __lowerCamelCase )[:, 1:]
UpperCamelCase :int = decoder(__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Optional[int] = torch.nn.functional.softmax(__lowerCamelCase , dim=2 ).max(dim=2 )
UpperCamelCase :Tuple = preds_max_prob[:, 1:]
for index in range(__lowerCamelCase ):
UpperCamelCase :Tuple = preds_str[index].find(__lowerCamelCase )
UpperCamelCase :List[Any] = preds_str[index][:pred_eos]
UpperCamelCase :List[Any] = preds_index[index].cpu().tolist()
UpperCamelCase :Optional[Any] = pred_index.index(__lowerCamelCase ) if eos_token in pred_index else -1
UpperCamelCase :List[str] = preds_max_prob[index][: pred_eos_index + 1]
UpperCamelCase :List[str] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowerCamelCase )
conf_scores.append(__lowerCamelCase )
return dec_strs, conf_scores
def _A ( self : Optional[Any] , __lowerCamelCase : str ):
UpperCamelCase :Dict = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(__lowerCamelCase )]
return decode_strs
def _A ( self : Union[str, Any] , __lowerCamelCase : str ):
return self.bpe_tokenizer.batch_decode(__lowerCamelCase )
def _A ( self : int , __lowerCamelCase : Optional[int] ):
UpperCamelCase :Any = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(__lowerCamelCase )]
return decode_strs
| 62 | 1 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for line in lines:
__SCREAMING_SNAKE_CASE = re.sub(R"""#.*""" , """""" , a__ ) # remove comments
if line:
filtered_lines.append(a__ )
__SCREAMING_SNAKE_CASE = """\n""".join(a__ )
# Make a hash from all this code
__SCREAMING_SNAKE_CASE = full_str.encode("""utf-8""" )
return shaaaa(a__ ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase : Dict = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase : str = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase : List[Any] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCAmelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 267 |
'''simple docstring'''
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = name
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = weight
def __repr__( self : str ) -> Union[str, Any]:
"""simple docstring"""
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return self.value
def UpperCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
return self.name
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.weight
def UpperCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
return self.value / self.weight
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sorted(a__ , key=a__ , reverse=a__ )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a__ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267 | 1 |
from __future__ import annotations
from typing import Any
class __UpperCAmelCase ( lowercase__ ):
pass
class __UpperCAmelCase :
def __init__( self : Dict, __A : Any ):
UpperCAmelCase : Any = data
UpperCAmelCase : Node | None = None
def __iter__( self : int ):
UpperCAmelCase : int = self
UpperCAmelCase : Dict = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_a )
yield node.data
UpperCAmelCase : Any = node.next_node
@property
def __magic_name__ ( self : Tuple ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = Node(1)
_lowerCamelCase : List[Any] = Node(2)
_lowerCamelCase : List[str] = Node(3)
_lowerCamelCase : Union[str, Any] = Node(4)
print(root_node.has_loop) # False
_lowerCamelCase : Optional[Any] = root_node.next_node
print(root_node.has_loop) # True
_lowerCamelCase : Union[str, Any] = Node(5)
_lowerCamelCase : List[Any] = Node(6)
_lowerCamelCase : Any = Node(5)
_lowerCamelCase : Optional[int] = Node(6)
print(root_node.has_loop) # False
_lowerCamelCase : Optional[int] = Node(1)
print(root_node.has_loop) # False
| 355 |
from __future__ import annotations
import queue
class __UpperCAmelCase :
def __init__( self : str, __A : Union[str, Any] ):
UpperCAmelCase : Dict = data
UpperCAmelCase : Tuple = None
UpperCAmelCase : Any = None
def a__ ( ) -> TreeNode:
print('''\n********Press N to stop entering at any point of time********\n''' )
UpperCAmelCase : Any = input('''Enter the value of the root node: ''' ).strip().lower()
UpperCAmelCase : queue.Queue = queue.Queue()
UpperCAmelCase : Tuple = TreeNode(int(UpperCAmelCase ) )
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : int = q.get()
UpperCAmelCase : Union[str, Any] = f'''Enter the left node of {node_found.data}: '''
UpperCAmelCase : List[Any] = input(UpperCAmelCase ).strip().lower() or '''n'''
if check == "n":
return tree_node
UpperCAmelCase : List[str] = TreeNode(int(UpperCAmelCase ) )
UpperCAmelCase : List[str] = left_node
q.put(UpperCAmelCase )
UpperCAmelCase : List[Any] = f'''Enter the right node of {node_found.data}: '''
UpperCAmelCase : List[Any] = input(UpperCAmelCase ).strip().lower() or '''n'''
if check == "n":
return tree_node
UpperCAmelCase : Dict = TreeNode(int(UpperCAmelCase ) )
UpperCAmelCase : Dict = right_node
q.put(UpperCAmelCase )
raise
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : queue.Queue = queue.Queue()
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : List[Any] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : queue.Queue = queue.Queue()
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : int = []
while not q.empty():
UpperCAmelCase : List[str] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(UpperCAmelCase )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : list[TreeNode] = []
UpperCAmelCase : List[str] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(UpperCAmelCase )
UpperCAmelCase : Dict = n.left
# end of while means current node doesn't have left child
UpperCAmelCase : Union[str, Any] = stack.pop()
# start to traverse its right child
UpperCAmelCase : List[str] = n.right
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : list[TreeNode] = []
UpperCAmelCase : Any = node
while n or stack:
while n:
stack.append(UpperCAmelCase )
UpperCAmelCase : Dict = n.left
UpperCAmelCase : Optional[int] = stack.pop()
print(n.data , end=''',''' )
UpperCAmelCase : Any = n.right
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase , UpperCAmelCase : Dict = [], []
UpperCAmelCase : Any = node
stacka.append(UpperCAmelCase )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase : Union[str, Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(UpperCAmelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def a__ ( UpperCAmelCase : str = "" , UpperCAmelCase : int=50 , UpperCAmelCase : Union[str, Any]="*" ) -> str:
if not s:
return "\n" + width * char
UpperCAmelCase , UpperCAmelCase : int = divmod(width - len(UpperCAmelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
_lowerCamelCase : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 99 | 0 |
import requests
from bsa import BeautifulSoup
def A ( _lowercase = "AAPL" ):
SCREAMING_SNAKE_CASE : str = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BeautifulSoup(requests.get(_lowercase ).text , '''html.parser''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 182 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Any = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 182 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ :Union[str, Any] = ViTImageProcessor if is_vision_available() else None
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = (3, 3_2, 1_2_8)
UpperCAmelCase_ : int = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : Optional[int] = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
UpperCAmelCase_ : Optional[int] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case_ ) + '\n' )
UpperCAmelCase_ : List[str] = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 3_2, 'width': 1_2_8},
}
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case_ , snake_case_ )
def _UpperCamelCase ( self , **snake_case_ ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def _UpperCamelCase ( self , **snake_case_ ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
UpperCAmelCase_ : List[str] = Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) )
return image_input
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = self.get_image_processor()
UpperCAmelCase_ : Optional[int] = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCAmelCase_ : Union[str, Any] = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCAmelCase_ : str = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : Dict = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCAmelCase_ : Optional[Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Union[str, Any] = image_processor(snake_case_ , return_tensors='np' )
UpperCAmelCase_ : Any = processor(images=snake_case_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : List[str] = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCAmelCase_ : Optional[Any] = 'test'
UpperCAmelCase_ : Union[str, Any] = processor(text=snake_case_ )
UpperCAmelCase_ : List[str] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : str = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCAmelCase_ : Dict = 'test'
UpperCAmelCase_ : Optional[Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[Any] = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCAmelCase_ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : int = processor.char_decode(snake_case_ )
UpperCAmelCase_ : Tuple = tokenizer.batch_decode(snake_case_ )
UpperCAmelCase_ : Optional[Any] = [seq.replace(' ' , '' ) for seq in decoded_tok]
self.assertListEqual(snake_case_ , snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.get_image_processor()
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = self.prepare_image_inputs()
UpperCAmelCase_ : str = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : str = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCAmelCase_ : Any = torch.randn(1 , 2_7 , 3_8 )
UpperCAmelCase_ : str = torch.randn(1 , 2_7 , 5_0_2_5_7 )
UpperCAmelCase_ : Optional[int] = torch.randn(1 , 2_7 , 3_0_5_2_2 )
UpperCAmelCase_ : Optional[int] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
| 361 | '''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str = " " ):
"""simple docstring"""
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[Any] = 0
for index, char in enumerate(lowerCamelCase_ ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase_ : Optional[Any] = index + 1
elif index + 1 == len(lowerCamelCase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 274 | 0 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_A : str = datasets.logging.get_logger(__name__)
_A : Dict = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
_A : str = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
_A : Optional[Any] = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def UpperCamelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Optional[Any]=False , snake_case_ : Optional[Any]=False , snake_case_ : Tuple=True , snake_case_ : List[str]=False , snake_case_ : Tuple="dummy_doc" ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = {doc: key_lines}
__lowerCAmelCase = {doc: sys_lines}
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(snake_case_ , key_doc_lines[doc] , snake_case_ )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(snake_case_ , key_doc_lines[doc] , snake_case_ , snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(snake_case_ , sys_doc_lines[doc] , snake_case_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(snake_case_ , key_doc_lines[doc] , snake_case_ , snake_case_ )
if remove_nested:
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(snake_case_ , snake_case_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(snake_case_ , snake_case_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowerCAmelCase = reader.get_mention_assignments(snake_case_ , snake_case_ )
__lowerCAmelCase = reader.get_mention_assignments(snake_case_ , snake_case_ )
__lowerCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"""Number of resulting singleton clusters in the key """
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"""files, respectively""" )
return doc_coref_infos
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : List[Any] ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = get_coref_infos(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for name, metric in metrics:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = evaluator.evaluate_documents(snake_case_ , snake_case_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
__lowerCAmelCase = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def UpperCamelCase_ ( snake_case_ : int ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
__lowerCAmelCase = line.split()[5]
if not parse_col == "-":
__lowerCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def a ( self : int ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ) -> Tuple:
__lowerCAmelCase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
__lowerCAmelCase = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE__ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowerCAmelCase = evaluate(
key_lines=SCREAMING_SNAKE_CASE__ , sys_lines=SCREAMING_SNAKE_CASE__ , metrics=SCREAMING_SNAKE_CASE__ , NP_only=SCREAMING_SNAKE_CASE__ , remove_nested=SCREAMING_SNAKE_CASE__ , keep_singletons=SCREAMING_SNAKE_CASE__ , min_span=SCREAMING_SNAKE_CASE__ , )
return score
| 229 | '''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_A : str = logging.get_logger(__name__)
_A : str = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def UpperCamelCase_ ( snake_case_ : Tuple ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__lowerCAmelCase = k.replace(snake_case_ , snake_case_ )
if k.startswith("""encoder""" ):
__lowerCAmelCase = k.replace(""".attn""" , """.self_attn""" )
__lowerCAmelCase = k.replace("""norm1""" , """self_attn_layer_norm""" )
__lowerCAmelCase = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__lowerCAmelCase = k.replace("""norm1""" , """self_attn_layer_norm""" )
__lowerCAmelCase = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__lowerCAmelCase = k.replace("""norm3""" , """final_layer_norm""" )
return k
def UpperCamelCase_ ( snake_case_ : List[str] ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__lowerCAmelCase = sd.pop(snake_case_ )
__lowerCAmelCase = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__lowerCAmelCase = v
_A : int = ['''START''']
@torch.no_grad()
def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" )
__lowerCAmelCase = model["""model"""]
__lowerCAmelCase = BlenderbotConfig.from_json_file(snake_case_ )
__lowerCAmelCase = BlenderbotForConditionalGeneration(snake_case_ )
__lowerCAmelCase = m.model.state_dict().keys()
__lowerCAmelCase = []
__lowerCAmelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__lowerCAmelCase = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__lowerCAmelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_ , strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
_A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
_A : Tuple = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 229 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 189 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
@dataclass
class a :
_lowercase = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
_lowercase = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
_lowercase = field(
default=1_0_2_4 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
_lowercase = field(
default=UpperCAmelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
_lowercase = field(
default=UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_lowercase = field(
default=UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
_lowercase = field(
default=UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "A csv or a json file containing the training data."} )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "A csv or a json file containing the validation data."} )
_lowercase = field(default=UpperCAmelCase , metadata={"help": "A csv or a json file containing the test data."} )
def _UpperCAmelCase ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
_UpperCAmelCase : int = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_UpperCAmelCase : Optional[int] = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class a :
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
_lowercase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_lowercase = field(
default=UpperCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCAmelCase : Tuple = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase )
datasets.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_UpperCAmelCase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_UpperCAmelCase : int = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_UpperCAmelCase : Tuple = data_args.train_file.split("." )[-1]
_UpperCAmelCase : Optional[Any] = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_UpperCAmelCase : Union[str, Any] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(F'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
_UpperCAmelCase : List[str] = load_dataset("csv" , data_files=lowerCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_UpperCAmelCase : Dict = load_dataset("json" , data_files=lowerCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_UpperCAmelCase : List[str] = raw_datasets["train"].features["label"].names
_UpperCAmelCase : Tuple = len(lowerCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_UpperCAmelCase : Tuple = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCAmelCase , )
_UpperCAmelCase : Union[str, Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase : Tuple = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase : List[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_UpperCAmelCase : List[Any] = {"Refused": 0, "Entailed": 1}
_UpperCAmelCase : Optional[int] = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
_UpperCAmelCase : int = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase: str ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase: List[Any] ):
_UpperCAmelCase : Any = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
_UpperCAmelCase : List[str] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_UpperCAmelCase : Tuple = examples["statement"]
_UpperCAmelCase : List[Any] = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
_UpperCAmelCase : Dict = tokenizer(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , truncation=lowerCAmelCase )
_UpperCAmelCase : List[str] = examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
_UpperCAmelCase : List[Any] = raw_datasets.map(
lowerCAmelCase , batched=lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_UpperCAmelCase : Dict = raw_datasets["train"]
if data_args.max_train_samples is not None:
_UpperCAmelCase : List[Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_UpperCAmelCase : Union[str, Any] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Any = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
_UpperCAmelCase : Dict = raw_datasets["test"]
if data_args.max_predict_samples is not None:
_UpperCAmelCase : Any = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase: EvalPrediction ):
_UpperCAmelCase : Optional[int] = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase ) else p.predictions
_UpperCAmelCase : Optional[Any] = np.argmax(lowerCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase : str = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase : int = DataCollatorWithPadding(lowerCAmelCase , pad_to_multiple_of=8 )
else:
_UpperCAmelCase : List[str] = None
# Initialize our Trainer
_UpperCAmelCase : List[Any] = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase , tokenizer=lowerCAmelCase , data_collator=lowerCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : Dict = last_checkpoint
_UpperCAmelCase : str = trainer.train(resume_from_checkpoint=lowerCAmelCase )
_UpperCAmelCase : Tuple = train_result.metrics
_UpperCAmelCase : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase )
)
_UpperCAmelCase : Any = min(lowerCAmelCase , len(lowerCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , lowerCAmelCase )
trainer.save_metrics("train" , lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase : Optional[int] = trainer.evaluate(eval_dataset=lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase )
_UpperCAmelCase : Any = min(lowerCAmelCase , len(lowerCAmelCase ) )
trainer.log_metrics("eval" , lowerCAmelCase )
trainer.save_metrics("eval" , lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_UpperCAmelCase : int = predict_dataset.remove_columns("label" )
_UpperCAmelCase : Any = trainer.predict(lowerCAmelCase , metric_key_prefix="predict" ).predictions
_UpperCAmelCase : List[str] = np.argmax(lowerCAmelCase , axis=1 )
_UpperCAmelCase : int = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(lowerCAmelCase ):
_UpperCAmelCase : List[Any] = label_list[item]
writer.write(F'{index}\t{item}\n' )
_UpperCAmelCase : int = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase )
else:
trainer.create_model_card(**lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 189 | 1 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ = 4000000 ) -> int:
_a : Optional[Any] = [0, 1]
_a : str = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_a : List[Any] = 0
for j in range(len(lowerCAmelCase_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 89 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self : Union[str, Any] , __snake_case : UNetaDModel , __snake_case : ScoreSdeVeScheduler ) -> int:
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : Optional[int] , __snake_case : int = 1 , __snake_case : int = 2000 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , **__snake_case : Optional[int] , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase : str = self.unet.config.sample_size
UpperCAmelCase : Union[str, Any] = (batch_size, 3, img_size, img_size)
UpperCAmelCase : int = self.unet
UpperCAmelCase : Any = randn_tensor(__snake_case , generator=__snake_case ) * self.scheduler.init_noise_sigma
UpperCAmelCase : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(__snake_case )
self.scheduler.set_sigmas(__snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase : Any = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase : Union[str, Any] = self.unet(__snake_case , __snake_case ).sample
UpperCAmelCase : Optional[Any] = self.scheduler.step_correct(__snake_case , __snake_case , generator=__snake_case ).prev_sample
# prediction step
UpperCAmelCase : Optional[Any] = model(__snake_case , __snake_case ).sample
UpperCAmelCase : List[str] = self.scheduler.step_pred(__snake_case , __snake_case , __snake_case , generator=__snake_case )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = output.prev_sample, output.prev_sample_mean
UpperCAmelCase : int = sample_mean.clamp(0 , 1 )
UpperCAmelCase : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Optional[Any] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__snake_case )
| 23 | 0 |
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
while b:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = b, a % b
return a
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase__ , a % b )
def __lowerCamelCase ():
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 350 | from __future__ import annotations
from collections.abc import Iterator
class lowercase :
def __init__( self : str , _UpperCamelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
class lowercase :
def __init__( self : str , _UpperCamelCase : Node ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tree
def __snake_case( self : int , _UpperCamelCase : Node | None ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : List[Any] ) -> Iterator[int]:
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 206 | 0 |
"""simple docstring"""
import unittest
from transformers import DonutProcessor
_lowercase = '''naver-clova-ix/donut-base'''
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
A = DonutProcessor.from_pretrained(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
A = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
A = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
A = self.processor.tokenajson(A_ )
self.assertDictEqual(A_ ,A_ ) | 74 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 68 | 0 |
"""simple docstring"""
from __future__ import annotations
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ) -> List[Any]: # noqa: E741
while r - l > 1:
_snake_case = (l + r) // 2
if v[m] >= key:
_snake_case = m
else:
_snake_case = m # noqa: E741
return r
def _UpperCAmelCase ( __lowerCamelCase : list[int] ) -> int:
if len(__lowerCamelCase ) == 0:
return 0
_snake_case = [0] * len(__lowerCamelCase )
_snake_case = 1
_snake_case = v[0]
for i in range(1 , len(__lowerCamelCase ) ):
if v[i] < tail[0]:
_snake_case = v[i]
elif v[i] > tail[length - 1]:
_snake_case = v[i]
length += 1
else:
_snake_case = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCAmelCase__ :
def __init__( self : Any , _lowerCamelCase : Optional[Any] , ):
_snake_case = parent
_snake_case = 13
_snake_case = 7
_snake_case = 30
_snake_case = self.seq_length + self.mem_len
_snake_case = 15
_snake_case = True
_snake_case = True
_snake_case = 99
_snake_case = [10, 50, 80]
_snake_case = 32
_snake_case = 32
_snake_case = 4
_snake_case = 8
_snake_case = 128
_snake_case = 2
_snake_case = 2
_snake_case = None
_snake_case = 1
_snake_case = 0
_snake_case = 3
_snake_case = self.vocab_size - 1
_snake_case = 0.0_1
def lowercase ( self : Optional[int] ):
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowercase ( self : Any ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowercase ( self : Dict , _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
_snake_case = TFTransfoXLModel(_lowerCamelCase )
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
_snake_case = {'''input_ids''': input_ids_a, '''mems''': mems_a}
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase ( self : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple ):
_snake_case = TFTransfoXLLMHeadModel(_lowerCamelCase )
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
_snake_case = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
_snake_case , _snake_case = model([input_ids_a, mems_a] ).to_tuple()
_snake_case = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase ( self : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] ):
_snake_case = TFTransfoXLForSequenceClassification(_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : str ):
_snake_case = self.prepare_config_and_inputs()
((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) = config_and_inputs
_snake_case = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__a = () if is_tf_available() else ()
__a = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowercase ( self : List[Any] ):
_snake_case = TFTransfoXLModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , d_embed=37 )
def lowercase ( self : List[str] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
self.model_tester.set_seed()
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_lowerCamelCase )
def lowercase ( self : str ):
self.model_tester.set_seed()
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_lowerCamelCase )
def lowercase ( self : str ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_lowerCamelCase )
def lowercase ( self : str ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_snake_case = model.get_output_embeddings()
assert isinstance(_lowerCamelCase , tf.keras.layers.Layer )
_snake_case = model.get_bias()
assert name is None
else:
_snake_case = model.get_output_embeddings()
assert x is None
_snake_case = model.get_bias()
assert name is None
def lowercase ( self : Optional[Any] ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowercase ( self : int ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFTransfoXLModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def lowercase ( self : int ):
pass
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def lowercase ( self : List[Any] ):
_snake_case = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
_snake_case = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_snake_case = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_snake_case = model.generate(_lowerCamelCase , max_length=200 , do_sample=_lowerCamelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , _lowerCamelCase )
| 40 | 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __magic_name__ ( A : int ):
'''simple docstring'''
a = prime_factors(A )
if is_square_free(A ):
return -1 if len(A ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Dict ) -> None:
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 107 | 1 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__a :int = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[Any]=None ):
require_version(deps[pkg] ,__UpperCamelCase ) | 359 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ):
"""simple docstring"""
A_ = []
for _ in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ):
"""simple docstring"""
A_ = []
for step in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(__UpperCamelCase ,"schedule.bin" )
torch.save(scheduler.state_dict() ,__UpperCamelCase )
A_ = torch.load(__UpperCamelCase )
scheduler.load_state_dict(__UpperCamelCase )
return lrs
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def __A ( self : Dict ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , )
for _ in range(1000 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
_lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_lowerCamelCase : Any = 1_0
def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A_ , A_ = data
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A_ = unwrap_schedule(UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule
A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class _a :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : List[str] ):
A_ = fn
def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ):
return self.fn(*UpperCAmelCase , **UpperCAmelCase )
@classmethod
def __A ( self : Dict , UpperCAmelCase : List[str] ):
A_ = list(map(self , scheduler.lr_lambdas ) ) | 329 | 0 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowerCAmelCase : str = ["""small""", """medium""", """large"""]
lowerCAmelCase : Dict = """lm_head.decoder.weight"""
lowerCAmelCase : Optional[Any] = """lm_head.weight"""
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = torch.load(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = d.pop(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
lowerCAmelCase : List[Any] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowerCAmelCase : Tuple = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
lowerCAmelCase : List[Any] = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 13 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowerCAmelCase : Optional[Any] = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_: Tuple = bs[:]
SCREAMING_SNAKE_CASE_: str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_: Optional[int] = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = set()
SCREAMING_SNAKE_CASE_: Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_: Tuple = char
return pairs
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]="replace" , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8") as vocab_handle:
SCREAMING_SNAKE_CASE_: Tuple = json.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_: List[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8") as merges_handle:
SCREAMING_SNAKE_CASE_: List[Any] = merges_handle.read().split("\n")[1:-1]
SCREAMING_SNAKE_CASE_: str = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_: List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_: List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : int):
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[str]):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_: Optional[int] = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_: int = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = bigram
SCREAMING_SNAKE_CASE_: Optional[int] = []
SCREAMING_SNAKE_CASE_: List[Any] = 0
while i < len(lowerCAmelCase__):
try:
SCREAMING_SNAKE_CASE_: List[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_: Tuple = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_: str = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = new_word
if len(lowerCAmelCase__) == 1:
break
else:
SCREAMING_SNAKE_CASE_: Dict = get_pairs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = " ".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(" "))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any]):
return self.decoder.get(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = "".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + "\n")
SCREAMING_SNAKE_CASE_: List[Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
SCREAMING_SNAKE_CASE_: List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__) + "\n")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_: Optional[Any] = " " + text
return (text, kwargs)
| 13 | 1 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCAmelCase__ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCAmelCase__ = 10
lowerCAmelCase__ = 256
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS:
return None
UpperCamelCase = MinHash(num_perm=_SCREAMING_SNAKE_CASE )
for token in set(_SCREAMING_SNAKE_CASE ):
min_hash.update(token.encode() )
return min_hash
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(_SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0}
class _lowerCamelCase :
def __init__(self , *,
__a = 0.85 , ) -> str:
UpperCamelCase = duplication_jaccard_threshold
UpperCamelCase = NUM_PERM
UpperCamelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
UpperCamelCase = defaultdict(__a )
def snake_case_ (self , __a , __a ) -> None:
UpperCamelCase = self._index.query(__a )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def snake_case_ (self ) -> List[List[Dict]]:
UpperCamelCase = []
for base, duplicates in self._duplicate_clusters.items():
UpperCamelCase = [base] + list(__a )
# reformat the cluster to be a list of dict
UpperCamelCase = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def snake_case_ (self , __a ) -> None:
UpperCamelCase = self.get_duplicate_clusters()
with open(__a , "w" ) as f:
json.dump(__a , __a )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = element
UpperCamelCase = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_SCREAMING_SNAKE_CASE , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = DuplicationIndex(duplication_jaccard_threshold=_SCREAMING_SNAKE_CASE )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_SCREAMING_SNAKE_CASE ) ) , max_queue_size=100 ) ):
di.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_tokens(_SCREAMING_SNAKE_CASE )
UpperCamelCase = get_tokens(_SCREAMING_SNAKE_CASE )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCAmelCase__ = None
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = []
for elementa in cluster:
UpperCamelCase = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
UpperCamelCase = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
UpperCamelCase = 1
extremes.append(_SCREAMING_SNAKE_CASE )
return extremes
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
global _shared_dataset
UpperCamelCase = dataset
UpperCamelCase = []
UpperCamelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_SCREAMING_SNAKE_CASE )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) , total=len(_SCREAMING_SNAKE_CASE ) , ):
extremes_list.append(_SCREAMING_SNAKE_CASE )
return extremes_list
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.85 ):
"""simple docstring"""
UpperCamelCase = make_duplicate_clusters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
UpperCamelCase = {}
UpperCamelCase = find_extremes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for extremes in extremes_clusters:
for element in extremes:
UpperCamelCase = element
UpperCamelCase = duplicate_indices - set(extreme_dict.keys() )
UpperCamelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=_SCREAMING_SNAKE_CASE )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
UpperCamelCase = element["base_index"] in extreme_dict
if element["is_extreme"]:
UpperCamelCase = extreme_dict[element["base_index"]]["copies"]
print(F"Original dataset size: {len(_SCREAMING_SNAKE_CASE )}" )
print(F"Number of duplicate clusters: {len(_SCREAMING_SNAKE_CASE )}" )
print(F"Files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}" )
print(F"Unique files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}" )
print(F"Filtered dataset size: {len(_SCREAMING_SNAKE_CASE )}" )
return ds_filter, duplicate_clusters
| 244 |
"""simple docstring"""
import math
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = int(math.floor(math.sqrt(_SCREAMING_SNAKE_CASE ) ) )
UpperCamelCase = 0
while arr[min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - 1] < x:
UpperCamelCase = step
step += int(math.floor(math.sqrt(_SCREAMING_SNAKE_CASE ) ) )
if prev >= n:
return -1
while arr[prev] < x:
UpperCamelCase = prev + 1
if prev == min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
lowerCAmelCase__ = int(input('''Enter the number to be searched:\n'''))
lowerCAmelCase__ = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f'''Number {x} is at index {res}''')
| 244 | 1 |
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE_: List[str] =getLogger(__name__)
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : str , snake_case_ : str , snake_case_ : int = 8 , snake_case_ : int = 10_24 , snake_case_ : Any="val" , snake_case_ : Tuple=None , snake_case_ : Any=False , snake_case_ : str="summarization" , snake_case_ : Dict=None , snake_case_ : Optional[Any]=1 , snake_case_ : Dict = None , snake_case_ : List[Any]="" , **snake_case_ : int , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = str(_snake_case )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=_snake_case )
UpperCAmelCase_ = Path(_snake_case )
UpperCAmelCase_ = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(_snake_case )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ).cuda()
if fpaa:
UpperCAmelCase_ = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_snake_case , _snake_case ) # update config with task specific params
UpperCAmelCase_ = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCAmelCase_ = num_return_sequences
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCAmelCase_ = tokenizer.model_max_length
if prefix is None:
UpperCAmelCase_ = prefix or getattr(model.config , "prefix" , "" ) or ''''''
UpperCAmelCase_ = SeqaSeqDataset(
_snake_case , _snake_case , _snake_case , max_target_length=10_24 , type_path=_snake_case , n_obs=_snake_case , prefix=_snake_case , **_snake_case , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCAmelCase_ = ds.make_sortish_sampler(_snake_case , distributed=_snake_case , add_extra_examples=_snake_case , shuffle=_snake_case )
UpperCAmelCase_ = DataLoader(_snake_case , sampler=_snake_case , batch_size=_snake_case , collate_fn=ds.collate_fn )
UpperCAmelCase_ = []
for batch in tqdm(_snake_case ):
UpperCAmelCase_ = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=_snake_case , num_beams=_snake_case , **_snake_case , )
UpperCAmelCase_ = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
UpperCAmelCase_ = batch['''ids''']
if num_return_sequences > 1:
UpperCAmelCase_ = chunks(_snake_case , _snake_case ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_snake_case ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(_snake_case , _snake_case )
return results, sampler.num_replicas
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=_snake_case , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=_snake_case , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=_snake_case , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=_snake_case , default=_snake_case )
parser.add_argument(
"--type_path" , type=_snake_case , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=_snake_case , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=_snake_case , default=8 , required=_snake_case , help="batch size" )
parser.add_argument(
"--local_rank" , type=_snake_case , default=-1 , required=_snake_case , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=_snake_case , default=_snake_case , required=_snake_case , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=_snake_case , default=1 , required=_snake_case , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=_snake_case , default=6_00 , required=_snake_case , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=_snake_case , default=_snake_case , required=_snake_case )
parser.add_argument("--tgt_lang" , type=_snake_case , default=_snake_case , required=_snake_case )
parser.add_argument(
"--prefix" , type=_snake_case , required=_snake_case , default=_snake_case , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = parser.parse_known_args()
UpperCAmelCase_ = parse_numeric_n_bool_cl_kwargs(_snake_case )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
UpperCAmelCase_ = Path(args.save_dir + "_tmp" )
Path(_snake_case ).mkdir(exist_ok=_snake_case ) # this handles locking.
UpperCAmelCase_ = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCAmelCase_ = {}
if args.src_lang is not None:
UpperCAmelCase_ = args.src_lang
if args.tgt_lang is not None:
UpperCAmelCase_ = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_snake_case )
UpperCAmelCase_ = eval_data_dir(
args.data_dir , _snake_case , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_snake_case , **_snake_case , )
if args.local_rank <= 0:
UpperCAmelCase_ = Path(args.save_dir )
save_dir.mkdir(exist_ok=_snake_case )
UpperCAmelCase_ = gather_results_from_each_node(_snake_case , _snake_case , args.sync_timeout )
UpperCAmelCase_ = combine_partial_results(_snake_case )
if args.num_return_sequences > 1:
UpperCAmelCase_ = save_dir.joinpath("pseudolabel_results.json" )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(_snake_case , _snake_case )
return
UpperCAmelCase_ = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(_snake_case ) as f:
UpperCAmelCase_ = [x.rstrip() for x in f.readlines()][: len(_snake_case )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCAmelCase_ = '''translation''' in args.task
UpperCAmelCase_ = calculate_bleu if calc_bleu else calculate_rouge
UpperCAmelCase_ = '''bleu''' if calc_bleu else '''rouge'''
UpperCAmelCase_ = score_fn(_snake_case , _snake_case )
UpperCAmelCase_ = len(_snake_case )
UpperCAmelCase_ = time.time() - start_time
UpperCAmelCase_ = round(runtime / metrics["n_obs"] , 4 )
UpperCAmelCase_ = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCAmelCase_ = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(_snake_case , _snake_case , indent=_snake_case )
print(_snake_case )
write_txt_file(_snake_case , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(_snake_case , save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(_snake_case )
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> List:
'''simple docstring'''
UpperCAmelCase_ = []
for partial_result in partial_results:
records.extend(_snake_case )
UpperCAmelCase_ = sorted(_snake_case , key=lambda snake_case_ : x["id"] )
UpperCAmelCase_ = [x['''pred'''] for x in records]
return preds
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : List[str] , snake_case_ : List[Any] ) -> List[Dict[str, List]]:
'''simple docstring'''
UpperCAmelCase_ = time.time()
logger.info("waiting for all nodes to finish" )
UpperCAmelCase_ = None
while (time.time() - start_wait) < timeout:
UpperCAmelCase_ = list(save_dir.glob("rank_*.json" ) )
if len(_snake_case ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCAmelCase_ = lmap(_snake_case , _snake_case )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 1 |
"""simple docstring"""
import math
def lowercase ( _snake_case : int ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( _snake_case : float = 0.1 ) ->int:
"""simple docstring"""
__snake_case : Tuple = 3
__snake_case : Any = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {'vocab_file': 'spiece.model'}
_UpperCAmelCase = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
_UpperCAmelCase = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
_UpperCAmelCase = '▁'
class snake_case_ ( __lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , _snake_case : List[Any] , _snake_case : Tuple="</s>" , _snake_case : Optional[int]="<unk>" , _snake_case : Union[str, Any]="<pad>" , _snake_case : Optional[int]=100 , _snake_case : str=None , _snake_case : Optional[Dict[str, Any]] = None , _snake_case : str=True , **_snake_case : int , )->None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase : Optional[int] = [F'''<extra_id_{i}>''' for i in range(_snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase : Tuple = len(set(filter(lambda _snake_case : bool("""extra_id""" in str(_snake_case ) ) , _snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
__lowerCAmelCase : List[Any] = legacy
__lowerCAmelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , extra_ids=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy=_snake_case , **_snake_case , )
__lowerCAmelCase : Union[str, Any] = vocab_file
__lowerCAmelCase : Optional[int] = extra_ids
__lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
@staticmethod
def UpperCAmelCase__ ( _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Optional[Any] )->List[Any]:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__lowerCAmelCase : List[Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , _snake_case , )
return max_model_length
@property
def UpperCAmelCase__ ( self : Any )->Any:
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCAmelCase__ ( self : Tuple )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False )->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_snake_case )) + [1]
return ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
def UpperCAmelCase__ ( self : Optional[Any] )->Optional[int]:
'''simple docstring'''
return list(
set(filter(lambda _snake_case : bool(re.search(R"""<extra_id_\d+>""" , _snake_case ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
return [self._convert_token_to_id(_snake_case ) for token in self.get_sentinel_tokens()]
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : List[int] )->List[int]:
'''simple docstring'''
if len(_snake_case ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCAmelCase__ ( self : List[str] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None )->List[int]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase__ ( self : List[str] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None )->List[int]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self._add_eos_if_not_present(_snake_case )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase : Dict = self._add_eos_if_not_present(_snake_case )
return token_ids_a + token_ids_a
def __getstate__( self : Union[str, Any] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.__dict__.copy()
__lowerCAmelCase : Tuple = None
return state
def __setstate__( self : int , _snake_case : List[Any] )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCAmelCase : Optional[Any] = {}
__lowerCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Tuple , _snake_case : "TextInput" , **_snake_case : List[Any] )->List[str]:
'''simple docstring'''
if not self.legacy:
__lowerCAmelCase : Any = SPIECE_UNDERLINE + text.replace(_snake_case , """ """ )
return super().tokenize(_snake_case , **_snake_case )
def UpperCAmelCase__ ( self : Tuple , _snake_case : Any , **_snake_case : Union[str, Any] )->str:
'''simple docstring'''
if not self.legacy:
__lowerCAmelCase : List[Any] = text.startswith(_snake_case )
if is_first:
__lowerCAmelCase : int = text[1:]
__lowerCAmelCase : Optional[int] = self.sp_model.encode(_snake_case , out_type=_snake_case )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(_snake_case ):
__lowerCAmelCase : str = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : Any )->Optional[Any]:
'''simple docstring'''
if token.startswith("""<extra_id_""" ):
__lowerCAmelCase : List[Any] = re.match(R"""<extra_id_(\d+)>""" , _snake_case )
__lowerCAmelCase : str = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_snake_case )
def UpperCAmelCase__ ( self : List[Any] , _snake_case : Dict )->Optional[int]:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
__lowerCAmelCase : Dict = self.sp_model.IdToPiece(_snake_case )
else:
__lowerCAmelCase : Optional[int] = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : int )->List[str]:
'''simple docstring'''
__lowerCAmelCase : int = []
__lowerCAmelCase : Union[str, Any] = """"""
__lowerCAmelCase : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_snake_case ) + token
__lowerCAmelCase : int = True
__lowerCAmelCase : List[Any] = []
else:
current_sub_tokens.append(_snake_case )
__lowerCAmelCase : Union[str, Any] = False
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def UpperCAmelCase__ ( self : List[Any] , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase : Optional[int] = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , """wb""" ) as fi:
__lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,) | 232 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'spiece.model'}
_UpperCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
_UpperCAmelCase = {
'google/pegasus-xsum': 512,
}
_UpperCAmelCase = logging.get_logger(__name__)
class snake_case_ ( __lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any]="<pad>" , _snake_case : int="</s>" , _snake_case : Any="<unk>" , _snake_case : Union[str, Any]="<mask_2>" , _snake_case : Any="<mask_1>" , _snake_case : Optional[int]=None , _snake_case : List[str]=103 , _snake_case : Optional[Dict[str, Any]] = None , **_snake_case : Optional[int] , )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = offset
if additional_special_tokens is not None:
if not isinstance(_snake_case , _snake_case ):
raise TypeError(
F'''additional_special_tokens should be of type {type(_snake_case )}, but is'''
F''' {type(_snake_case )}''' )
__lowerCAmelCase : List[str] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(_snake_case ) , self.offset - 1 )
]
if len(set(_snake_case ) ) != len(_snake_case ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__lowerCAmelCase : Dict = additional_special_tokens_extended
else:
__lowerCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )]
__lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_snake_case , unk_token=_snake_case , mask_token=_snake_case , pad_token=_snake_case , mask_token_sent=_snake_case , offset=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
__lowerCAmelCase : Optional[Any] = mask_token_sent
__lowerCAmelCase : Any = vocab_file
__lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# add special tokens to encoder dict
__lowerCAmelCase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__lowerCAmelCase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def UpperCAmelCase__ ( self : str )->int:
'''simple docstring'''
return len(self.sp_model ) + self.offset
def UpperCAmelCase__ ( self : Dict )->Dict[str, int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] )->str:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.__dict__.copy()
__lowerCAmelCase : Union[str, Any] = None
return state
def __setstate__( self : Any , _snake_case : str )->Any:
'''simple docstring'''
__lowerCAmelCase : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCAmelCase : Any = {}
__lowerCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Dict , _snake_case : str )->List[str]:
'''simple docstring'''
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def UpperCAmelCase__ ( self : Tuple , _snake_case : str )->int:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__lowerCAmelCase : Any = self.sp_model.piece_to_id(_snake_case )
return sp_id + self.offset
def UpperCAmelCase__ ( self : List[Any] , _snake_case : int )->str:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__lowerCAmelCase : Optional[int] = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : Optional[int] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Any = []
__lowerCAmelCase : Dict = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_snake_case ) + token
__lowerCAmelCase : int = []
else:
current_sub_tokens.append(_snake_case )
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : Dict=False )->int:
'''simple docstring'''
return 1
def UpperCAmelCase__ ( self : Tuple , _snake_case : Tuple )->str:
'''simple docstring'''
__lowerCAmelCase : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase__ ( self : List[str] , _snake_case : List , _snake_case : Optional[List] = None , _snake_case : bool = False )->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(_snake_case )
elif token_ids_a is None:
return self._special_token_mask(_snake_case ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase__ ( self : Any , _snake_case : Union[str, Any] , _snake_case : Tuple=None )->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Any , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase : Optional[int] = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , """wb""" ) as fi:
__lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,) | 232 | 1 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> str:
if attention_mask is None:
lowerCamelCase__ : int = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ : int = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ : Tuple = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_UpperCAmelCase )
if decoder_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_UpperCAmelCase )
if cross_attn_head_mask is None:
lowerCamelCase__ : Tuple = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowerCAmelCase :
def __init__( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : int=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=False , UpperCAmelCase : List[str]=99 , UpperCAmelCase : Optional[int]=16 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : str=4 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : List[Any]="relu" , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : str=20 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : str=1 , UpperCAmelCase : List[str]=0 , ) -> Dict:
lowerCamelCase__ : Optional[int] = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : int = seq_length
lowerCamelCase__ : Union[str, Any] = is_training
lowerCamelCase__ : Dict = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Dict = encoder_layerdrop
lowerCamelCase__ : List[Any] = decoder_layerdrop
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : str = eos_token_id
lowerCamelCase__ : int = pad_token_id
lowerCamelCase__ : Union[str, Any] = bos_token_id
def A_ ( self : Any ) -> int:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = self.eos_token_id # Eos Token
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ : Any = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Dict = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Tuple = self.get_config()
lowerCamelCase__ : str = prepare_mam_aaa_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, inputs_dict
def A_ ( self : Optional[int] ) -> Union[str, Any]:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def A_ ( self : int ) -> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] ) -> Any:
lowerCamelCase__ : Any = MaMaaaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval()
lowerCamelCase__ : Tuple = inputs_dict['input_ids']
lowerCamelCase__ : Tuple = inputs_dict['attention_mask']
lowerCamelCase__ : str = inputs_dict['head_mask']
# first forward pass
lowerCamelCase__ : Dict = model(UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCamelCase__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCamelCase__ : Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase )['last_hidden_state']
lowerCamelCase__ : str = model(UpperCAmelCase , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase )[
'last_hidden_state'
]
# select random slice
lowerCamelCase__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-2 ) )
def A_ ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : str ) -> List[str]:
lowerCamelCase__ : Optional[int] = MaMaaaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).eval()
lowerCamelCase__ : Optional[int] = model(**UpperCAmelCase )
lowerCamelCase__ : Dict = outputs.encoder_last_hidden_state
lowerCamelCase__ : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : Optional[int] = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase )
lowerCamelCase__ : List[Any] = MaMaaaEncoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : int = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : List[Any] = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase )
lowerCamelCase__ : Any = MaMaaaDecoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : Any = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def A_ ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : List[str] ) -> Any:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Union[str, Any] = MaMaaaModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCAmelCase )
def A_ ( self : int ) -> List[Any]:
self.config_tester.run_common_tests()
def A_ ( self : Optional[int] ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = model_class.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase )
self.assertEqual(info['missing_keys'] , [] )
def A_ ( self : Optional[int] ) -> int:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase )
def A_ ( self : Tuple ) -> List[str]:
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase )
def A_ ( self : Optional[int] ) -> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCamelCase__ : Union[str, Any] = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : List[str] = copy.deepcopy(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
if not self.is_encoder_decoder:
lowerCamelCase__ : Tuple = inputs['input_ids']
del inputs["input_ids"]
else:
lowerCamelCase__ : List[str] = inputs['input_ids']
lowerCamelCase__ : List[Any] = inputs.get('decoder_input_ids' , UpperCAmelCase )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , UpperCAmelCase )
lowerCamelCase__ : List[str] = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCamelCase__ : List[Any] = wte(UpperCAmelCase )
else:
lowerCamelCase__ : int = wte(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = wte(UpperCAmelCase )
with torch.no_grad():
model(**UpperCAmelCase )[0]
def A_ ( self : Dict ) -> List[str]:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Dict = input_dict['input_ids']
lowerCamelCase__ : List[str] = input_ids.ne(1 ).to(UpperCAmelCase )
lowerCamelCase__ : List[str] = MaMaaaForConditionalGeneration(UpperCAmelCase ).eval().to(UpperCAmelCase )
if torch_device == "cuda":
model.half()
model.generate(UpperCAmelCase , attention_mask=UpperCAmelCase )
model.generate(num_beams=4 , do_sample=UpperCAmelCase , early_stopping=UpperCAmelCase , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[Any]:
return torch.tensor(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def A_ ( self : Dict ) -> Optional[int]:
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(UpperCAmelCase )
lowerCamelCase__ : str = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
lowerCamelCase__ : Any = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
lowerCamelCase__ : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase , UpperCAmelCase )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**UpperCAmelCase )[0]
lowerCamelCase__ : Tuple = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , UpperCAmelCase )
# change to expected output here
lowerCamelCase__ : int = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def A_ ( self : List[str] ) -> Dict:
lowerCamelCase__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(UpperCAmelCase )
# change to intended input
lowerCamelCase__ : Union[str, Any] = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
lowerCamelCase__ : Tuple = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
lowerCamelCase__ : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , UpperCAmelCase , UpperCAmelCase )
with torch.no_grad():
lowerCamelCase__ : str = model(**UpperCAmelCase )[0]
lowerCamelCase__ : Dict = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase )
# change to expected output here
lowerCamelCase__ : Optional[int] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def A_ ( self : int ) -> str:
lowerCamelCase__ : List[str] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(UpperCAmelCase )
lowerCamelCase__ : str = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
lowerCamelCase__ : Dict = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCamelCase__ : List[str] = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
lowerCamelCase__ : Tuple = model.generate(
input_ids=dct['input_ids'].to(UpperCAmelCase ) , attention_mask=dct['attention_mask'].to(UpperCAmelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
lowerCamelCase__ : str = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
lowerCamelCase__ : Tuple = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
assert generated == expected_en
| 50 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowerCAmelCase = """true"""
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : int=8_2 , snake_case_ : Optional[Any]=1_6 ) ->Dict:
set_seed(4_2 )
lowerCamelCase__ : List[Any] =RegressionModel()
lowerCamelCase__ : List[Any] =deepcopy(snake_case_ )
lowerCamelCase__ : List[str] =RegressionDataset(length=snake_case_ )
lowerCamelCase__ : Any =DataLoader(snake_case_ , batch_size=snake_case_ )
model.to(accelerator.device )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =accelerator.prepare(snake_case_ , snake_case_ )
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : str=False ) ->List[str]:
lowerCamelCase__ : int =AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowerCamelCase__ : List[Any] =load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case_ : Optional[Any] ):
lowerCamelCase__ : Optional[int] =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
with accelerator.main_process_first():
lowerCamelCase__ : Tuple =dataset.map(
snake_case_ , batched=snake_case_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowerCamelCase__ : List[Any] =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case_ : Union[str, Any] ):
if use_longest:
return tokenizer.pad(snake_case_ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case_ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return DataLoader(snake_case_ , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=1_6 )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple ) ->Any:
lowerCamelCase__ : Optional[int] =Accelerator(dispatch_batches=snake_case_ , split_batches=snake_case_ )
lowerCamelCase__ : List[Any] =get_dataloader(snake_case_ , not dispatch_batches )
lowerCamelCase__ : Union[str, Any] =AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Dict =accelerator.prepare(snake_case_ , snake_case_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : List[str] ) ->Dict:
lowerCamelCase__ : Optional[Any] =[]
for batch in dataloader:
lowerCamelCase__ , lowerCamelCase__ : int =batch.values()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =[], []
for logit, targ in logits_and_targets:
logits.append(snake_case_ )
targs.append(snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =torch.cat(snake_case_ ), torch.cat(snake_case_ )
return logits, targs
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : Optional[int]=8_2 , snake_case_ : Any=False , snake_case_ : List[Any]=False , snake_case_ : Optional[int]=1_6 ) ->List[str]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =get_basic_setup(snake_case_ , snake_case_ , snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Any =generate_predictions(snake_case_ , snake_case_ , snake_case_ )
assert (
len(snake_case_ ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case_ )}"""
def lowerCAmelCase_ ( snake_case_ : bool = False , snake_case_ : bool = False ) ->str:
lowerCamelCase__ : Dict =evaluate.load('glue' , 'mrpc' )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =get_mrpc_setup(snake_case_ , snake_case_ )
# First do baseline
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =setup['no']
model.to(snake_case_ )
model.eval()
for batch in dataloader:
batch.to(snake_case_ )
with torch.inference_mode():
lowerCamelCase__ : Any =model(**snake_case_ )
lowerCamelCase__ : List[str] =outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case_ , references=batch['labels'] )
lowerCamelCase__ : Optional[Any] =metric.compute()
# Then do distributed
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCamelCase__ : List[Any] =model(**snake_case_ )
lowerCamelCase__ : str =outputs.logits.argmax(dim=-1 )
lowerCamelCase__ : int =batch['labels']
lowerCamelCase__ , lowerCamelCase__ : List[Any] =accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case_ , references=snake_case_ )
lowerCamelCase__ : List[str] =metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def lowerCAmelCase_ ( ) ->str:
lowerCamelCase__ : List[str] =Accelerator(split_batches=snake_case_ , dispatch_batches=snake_case_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case_ , snake_case_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCamelCase__ : Dict =Accelerator(split_batches=snake_case_ , dispatch_batches=snake_case_ )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case_ , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowerCamelCase__ : List[Any] =Accelerator()
test_torch_metrics(snake_case_ , 5_1_2 )
accelerator.state._reset_state()
def lowerCAmelCase_ ( snake_case_ : List[Any] ) ->Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 126 | 0 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_UpperCAmelCase : Any = logging.getLogger(__name__)
_UpperCAmelCase : Tuple = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_UpperCAmelCase : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowercase :
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_SCREAMING_SNAKE_CASE )} , )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class lowercase :
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "The input training data file (a text file)."} )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
__lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
__lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
__lowercase : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether ot not to use whole word mask."} )
__lowercase : float = field(
default=0.1_5 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
__lowercase : float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
__lowercase : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
__lowercase : int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
__lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A ( lowercase , lowercase , lowercase = False , lowercase = None , ) -> Optional[int]:
'''simple docstring'''
def _dataset(lowercase , lowercase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=lowercase , file_path=lowercase , block_size=args.block_size , ref_path=lowercase , )
return LineByLineTextDataset(tokenizer=lowercase , file_path=lowercase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowercase , file_path=lowercase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowercase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowercase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def A ( ) -> str:
'''simple docstring'''
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , lowercase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCamelCase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCamelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCamelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
UpperCamelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCamelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
UpperCamelCase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
UpperCamelCase = AutoModelWithLMHead.from_config(lowercase )
model.resize_token_embeddings(len(lowercase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
UpperCamelCase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCamelCase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCamelCase = (
get_dataset(lowercase , tokenizer=lowercase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCamelCase = (
get_dataset(lowercase , tokenizer=lowercase , evaluate=lowercase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCamelCase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowercase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCamelCase = DataCollatorForWholeWordMask(
tokenizer=lowercase , mlm_probability=data_args.mlm_probability )
else:
UpperCamelCase = DataCollatorForLanguageModeling(
tokenizer=lowercase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCamelCase = Trainer(
model=lowercase , args=lowercase , data_collator=lowercase , train_dataset=lowercase , eval_dataset=lowercase , prediction_loss_only=lowercase , )
# Training
if training_args.do_train:
UpperCamelCase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowercase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCamelCase = trainer.evaluate()
UpperCamelCase = math.exp(eval_output['eval_loss'] )
UpperCamelCase = {'perplexity': perplexity}
UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , lowercase , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(lowercase )
return results
def A ( lowercase ) -> Tuple:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 110 |
from __future__ import annotations
class lowercase :
def __init__( self , A_ , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = text, pattern
UpperCamelCase , UpperCamelCase = len(A_ ), len(A_ )
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __UpperCamelCase ( self ) -> list[int]:
"""simple docstring"""
# searches pattern in text and returns index positions
UpperCamelCase = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCamelCase = self.mismatch_in_text(A_ )
if mismatch_index == -1:
positions.append(A_ )
else:
UpperCamelCase = self.match_in_pattern(self.text[mismatch_index] )
UpperCamelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_UpperCAmelCase : Union[str, Any] = "ABAABA"
_UpperCAmelCase : Any = "AB"
_UpperCAmelCase : Dict = BoyerMooreSearch(text, pattern)
_UpperCAmelCase : Optional[int] = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 110 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if num <= 0:
_a : Optional[Any] = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(UpperCamelCase__ )
_a : Dict = [True] * (num + 1)
_a : Optional[Any] = []
_a : Any = 2
_a : Union[str, Any] = int(math.sqrt(UpperCamelCase__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(UpperCamelCase__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , UpperCamelCase__ ):
if sieve[i] is True:
_a : Optional[int] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(UpperCamelCase__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 294 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'spiece.model'}
_snake_case = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class UpperCamelCase ( snake_case_ ):
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="<cls>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ) -> None:
_a : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
_a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
_a : Optional[Any] = 3
_a : Tuple = do_lower_case
_a : Tuple = remove_space
_a : Tuple = keep_accents
_a : Tuple = vocab_file
_a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_a : int = jieba
_a : Tuple = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowercase ( self : Optional[Any] ) -> Any:
return len(self.sp_model )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
_a : Tuple = self.__dict__.copy()
_a : Tuple = None
return state
def __setstate__( self : Any , UpperCAmelCase__ : Dict ) -> Dict:
_a : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple = {}
_a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Dict:
if self.remove_space:
_a : Optional[int] = """ """.join(inputs.strip().split() )
else:
_a : List[Any] = inputs
_a : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_a : Optional[Any] = unicodedata.normalize("""NFKD""" , UpperCAmelCase__ )
_a : Dict = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
_a : Union[str, Any] = outputs.lower()
return outputs
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
_a : str = self.preprocess_text(UpperCAmelCase__ )
_a : Dict = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
_a : Union[str, Any] = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a : Dict = cur_pieces[1:]
else:
_a : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> int:
return self.sp_model.PieceToId(UpperCAmelCase__ )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Any:
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def _lowercase ( self : Any , UpperCAmelCase__ : Any ) -> Dict:
_a : Dict = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , """ """ ).strip()
return out_string
def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Optional[Any] = [self.sep_token_id]
_a : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1]
return ([0] * len(UpperCAmelCase__ )) + [1, 1]
def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Any = [self.sep_token_id]
_a : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Union[str, Any] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , """wb""" ) as fi:
_a : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def _lowercase ( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> List[str]:
_a : Tuple = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Optional[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 294 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _lowercase :
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =data
__UpperCamelCase =None
class _lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =None
__UpperCamelCase =None
def __iter__( self : int ) -> Iterator[Any]:
'''simple docstring'''
__UpperCamelCase =self.head
while self.head:
yield node.data
__UpperCamelCase =node.next
if node == self.head:
break
def __len__( self : Union[str, Any] ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : str ) -> Union[str, Any]:
'''simple docstring'''
return "->".join(str(UpperCamelCase__ ) for item in iter(self ) )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Any ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : Any ) -> None:
'''simple docstring'''
self.insert_nth(0 , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Any ) -> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
__UpperCamelCase =Node(UpperCamelCase__ )
if self.head is None:
__UpperCamelCase =new_node # first node points itself
__UpperCamelCase =__UpperCamelCase =new_node
elif index == 0: # insert at head
__UpperCamelCase =self.head
__UpperCamelCase =__UpperCamelCase =new_node
else:
__UpperCamelCase =self.head
for _ in range(index - 1 ):
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next
__UpperCamelCase =new_node
if index == len(self ) - 1: # insert at tail
__UpperCamelCase =new_node
def UpperCAmelCase_ ( self : Any ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : int = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
__UpperCamelCase =self.head
if self.head == self.tail: # just one node
__UpperCamelCase =__UpperCamelCase =None
elif index == 0: # delete head node
__UpperCamelCase =self.tail.next.next
__UpperCamelCase =self.head.next
else:
__UpperCamelCase =self.head
for _ in range(index - 1 ):
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next.next
if index == len(self ) - 1: # delete at tail
__UpperCamelCase =temp
return delete_node.data
def UpperCAmelCase_ ( self : str ) -> bool:
'''simple docstring'''
return len(self ) == 0
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =CircularLinkedList()
assert len(__UpperCamelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__UpperCamelCase ) == i
circular_linked_list.insert_nth(__UpperCamelCase , i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | """simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowercase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 85 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __lowerCAmelCase ( __A ):
"""simple docstring"""
snake_case_ = '''time_series_transformer'''
snake_case_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "student_t" , lowerCamelCase__ = "nll" , lowerCamelCase__ = 1 , lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7] , lowerCamelCase__ = "mean" , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 32 , lowerCamelCase__ = 32 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = True , lowerCamelCase__ = "gelu" , lowerCamelCase__ = 64 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.02 , lowerCamelCase__=True , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
__lowerCamelCase = prediction_length
__lowerCamelCase = context_length or prediction_length
__lowerCamelCase = distribution_output
__lowerCamelCase = loss
__lowerCamelCase = input_size
__lowerCamelCase = num_time_features
__lowerCamelCase = lags_sequence
__lowerCamelCase = scaling
__lowerCamelCase = num_dynamic_real_features
__lowerCamelCase = num_static_real_features
__lowerCamelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowerCamelCase__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__lowerCamelCase = cardinality
else:
__lowerCamelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCamelCase__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__lowerCamelCase = embedding_dimension
else:
__lowerCamelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCamelCase = num_parallel_samples
# Transformer architecture configuration
__lowerCamelCase = input_size * len(lowerCamelCase__ ) + self._number_of_features
__lowerCamelCase = d_model
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = use_cache
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 90 | import logging
from transformers import PretrainedConfig
_UpperCAmelCase = logging.getLogger(__name__)
_UpperCAmelCase = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''bertabs'''
def __init__( self , lowercase=3_0_5_2_2 , lowercase=5_1_2 , lowercase=6 , lowercase=5_1_2 , lowercase=8 , lowercase=5_1_2 , lowercase=0.2 , lowercase=6 , lowercase=7_6_8 , lowercase=8 , lowercase=2_0_4_8 , lowercase=0.2 , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = vocab_size
A_ : Union[str, Any] = max_pos
A_ : List[str] = enc_layers
A_ : Tuple = enc_hidden_size
A_ : List[Any] = enc_heads
A_ : str = enc_ff_size
A_ : Optional[Any] = enc_dropout
A_ : Dict = dec_layers
A_ : Optional[Any] = dec_hidden_size
A_ : int = dec_heads
A_ : Any = dec_ff_size
A_ : List[str] = dec_dropout
| 140 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCAmelCase: float , lowerCAmelCase: float ) -> bool:
_snake_case : List[Any] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_snake_case : Optional[Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
_snake_case : Optional[int] = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Callable[[float], float] , lowerCAmelCase: float = 0.0 , lowerCAmelCase: float = 1.0 , )-> float:
return mean(
function_to_integrate(uniform(lowerCAmelCase , lowerCAmelCase ) ) for _ in range(lowerCAmelCase ) ) * (max_value - min_value)
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: float = 0.0 , lowerCAmelCase: float = 1.0 )-> None:
def identity_function(lowerCAmelCase: float ) -> float:
return x
_snake_case : Tuple = area_under_curve_estimator(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_snake_case : Optional[Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print('******************' )
def lowerCamelCase_ ( lowerCAmelCase: int )-> None:
def function_to_integrate(lowerCAmelCase: float ) -> float:
return sqrt(4.0 - x * x )
_snake_case : Dict = area_under_curve_estimator(
lowerCAmelCase , lowerCAmelCase , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int | None = None , lowerCAmelCase: int | None = None )-> None:
if start is None:
_snake_case : int = 0
if end is None:
_snake_case : Optional[int] = len(lowerCAmelCase ) - 1
if start >= end:
return
_snake_case : int = (start + end) // 2
slowsort(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
slowsort(lowerCAmelCase , mid + 1 , lowerCAmelCase )
if sequence[end] < sequence[mid]:
_snake_case , _snake_case : str = sequence[mid], sequence[end]
slowsort(lowerCAmelCase , lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 260 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = DiTPipeline
UpperCAmelCase__ : int = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCAmelCase__ : Any = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
UpperCAmelCase__ : int = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCAmelCase__ : List[Any] = False
def _a ( self ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase =TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=A_ , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=A_ , )
__UpperCamelCase =AutoencoderKL()
__UpperCamelCase =DDIMScheduler()
__UpperCamelCase ={'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def _a ( self , A_ , A_=0 ) -> Union[str, Any]:
if str(A_ ).startswith('mps' ):
__UpperCamelCase =torch.manual_seed(A_ )
else:
__UpperCamelCase =torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase ={
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _a ( self ) -> Any:
__UpperCamelCase ='cpu'
__UpperCamelCase =self.get_dummy_components()
__UpperCamelCase =self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =self.get_dummy_inputs(A_ )
__UpperCamelCase =pipe(**A_ ).images
__UpperCamelCase =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__UpperCamelCase =np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__UpperCamelCase =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A_ , 1E-3 )
def _a ( self ) -> Tuple:
self._test_inference_batch_single_identical(relax_max_difference=A_ , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> List[str]:
__UpperCamelCase =torch.manual_seed(0 )
__UpperCamelCase =DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__UpperCamelCase =['vase', 'umbrella', 'white shark', 'white wolf']
__UpperCamelCase =pipe.get_label_ids(A_ )
__UpperCamelCase =pipe(A_ , generator=A_ , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(A_ , A_ ):
__UpperCamelCase =load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def _a ( self ) -> Any:
__UpperCamelCase =DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__UpperCamelCase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__UpperCamelCase =['vase', 'umbrella']
__UpperCamelCase =pipe.get_label_ids(A_ )
__UpperCamelCase =torch.manual_seed(0 )
__UpperCamelCase =pipe(A_ , generator=A_ , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(A_ , A_ ):
__UpperCamelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 62 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
with open(snake_case__ ) as metadata_file:
SCREAMING_SNAKE_CASE__ = json.load(snake_case__ )
SCREAMING_SNAKE_CASE__ = LukeConfig(use_entity_aware_attention=snake_case__ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ = torch.load(snake_case__ , map_location="""cpu""" )
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ = load_entity_vocab(snake_case__ )
SCREAMING_SNAKE_CASE__ = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ = AddedToken("""<ent>""" , lstrip=snake_case__ , rstrip=snake_case__ )
SCREAMING_SNAKE_CASE__ = AddedToken("""<ent2>""" , lstrip=snake_case__ , rstrip=snake_case__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE__ = LukeTokenizer.from_pretrained(snake_case__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ = state_dict["""embeddings.word_embeddings.weight"""]
SCREAMING_SNAKE_CASE__ = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ = f"""encoder.layer.{layer_index}.attention.self."""
SCREAMING_SNAKE_CASE__ = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
SCREAMING_SNAKE_CASE__ = entity_emb[entity_vocab["""[MASK]"""]]
SCREAMING_SNAKE_CASE__ = LukeModel(config=snake_case__ ).eval()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(snake_case__ , strict=snake_case__ )
if not (len(snake_case__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"""Missing keys {", ".join(snake_case__ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
SCREAMING_SNAKE_CASE__ = LukeTokenizer.from_pretrained(snake_case__ , task="""entity_classification""" )
SCREAMING_SNAKE_CASE__ = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
SCREAMING_SNAKE_CASE__ = (39, 42)
SCREAMING_SNAKE_CASE__ = tokenizer(snake_case__ , entity_spans=[span] , add_prefix_space=snake_case__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = model(**snake_case__ )
# Verify word hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE__ = torch.Size((1, 42, 10_24) )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
SCREAMING_SNAKE_CASE__ = torch.Size((1, 42, 7_68) )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1, 10_24) )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1, 7_68) )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(snake_case__ ) )
model.save_pretrained(snake_case__ )
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = line.rstrip().split("""\t""" )
SCREAMING_SNAKE_CASE__ = index
return entity_vocab
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
A_ : Optional[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 165 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = CustomTokenizer
pass
| 146 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
A : List[Any] = logging.get_logger(__name__)
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : List[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : List[str] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 146 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : List[str] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
snake_case : int = '''Create a default config file for Accelerate with only a few flags set.'''
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any]="no" , UpperCAmelCase_ : str = default_json_config_file , UpperCAmelCase_ : bool = False ):
"""simple docstring"""
a :List[str] = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
a :Optional[Any] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
a :List[Any] = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
a :Dict = torch.cuda.device_count()
a :Tuple = num_gpus
a :int = False
if num_gpus > 1:
a :str = '''MULTI_GPU'''
else:
a :List[Any] = '''NO'''
elif is_xpu_available() and use_xpu:
a :List[Any] = torch.xpu.device_count()
a :Optional[int] = num_xpus
a :List[Any] = False
if num_xpus > 1:
a :int = '''MULTI_XPU'''
else:
a :str = '''NO'''
elif is_npu_available():
a :List[str] = torch.npu.device_count()
a :Any = num_npus
a :Optional[int] = False
if num_npus > 1:
a :List[str] = '''MULTI_NPU'''
else:
a :Dict = '''NO'''
else:
a :str = 0
a :Optional[Any] = True
a :Optional[Any] = 1
a :str = '''NO'''
a :List[str] = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
a :List[Any] = parser.add_parser('''default''' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'''--config_file''' , default=UpperCAmelCase_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=UpperCAmelCase_ , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
a :Optional[Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 94 | 0 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
for char in word:
lowercase_ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = set()
for token in tokens:
lowercase_ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowercase_ = list(__lowerCAmelCase )
return word_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowercase_ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowercase_ = bert_tokens
lowercase_ , lowercase_ = 0, len(__lowerCAmelCase )
while start < end:
lowercase_ = True
if is_chinese(bert_word[start] ):
lowercase_ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowercase_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ = """##""" + bert_word[j]
lowercase_ = start + i
lowercase_ = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
lowercase_ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = []
for id in input_ids:
lowercase_ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowercase_ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowercase_ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ = LTP(args.ltp ) # faster in GPU device
lowercase_ = BertTokenizer.from_pretrained(args.bert )
lowercase_ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase_ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase : int = parser.parse_args()
main(args)
| 313 |
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None):
"""simple docstring"""
lowercase_ = self.layer[current_layer](lowerCAmelCase_ , lowerCAmelCase_ , head_mask[current_layer])
lowercase_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase_ : Dict):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = BertEncoderWithPabee(lowerCAmelCase_)
self.init_weights()
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = threshold
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = patience
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.inference_layers_num / self.inference_instances_num
lowercase_ = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase_)
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""")
elif input_ids is not None:
lowercase_ = input_ids.size()
elif inputs_embeds is not None:
lowercase_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""")
lowercase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
if token_type_ids is None:
lowercase_ = torch.zeros(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase_ = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowercase_ , lowercase_ , lowercase_ = encoder_hidden_states.size()
lowercase_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
lowercase_ = self.invert_attention_mask(lowerCAmelCase_)
else:
lowercase_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase_ = self.get_head_mask(lowerCAmelCase_ , self.config.num_hidden_layers)
lowercase_ = self.embeddings(
input_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_)
lowercase_ = embedding_output
if self.training:
lowercase_ = []
for i in range(self.config.num_hidden_layers):
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](output_dropout(lowerCAmelCase_))
res.append(lowerCAmelCase_)
elif self.patience == 0: # Use all layers for inference
lowercase_ = self.encoder(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
lowercase_ = self.pooler(encoder_outputs[0])
lowercase_ = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase_)]
else:
lowercase_ = 0
lowercase_ = None
lowercase_ = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](lowerCAmelCase_)
if regression:
lowercase_ = logits.detach()
if patient_result is not None:
lowercase_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
lowercase_ = 0
else:
lowercase_ = logits.detach().argmax(dim=1)
if patient_result is not None:
lowercase_ = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase_)):
patient_counter += 1
else:
lowercase_ = 0
lowercase_ = logits
if patient_counter == self.patience:
break
lowercase_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = config.num_labels
lowercase_ = BertModelWithPabee(lowerCAmelCase_)
lowercase_ = nn.Dropout(config.hidden_dropout_prob)
lowercase_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , ):
"""simple docstring"""
lowercase_ = self.bert(
input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowercase_ = (logits[-1],)
if labels is not None:
lowercase_ = None
lowercase_ = 0
for ix, logits_item in enumerate(lowerCAmelCase_):
if self.num_labels == 1:
# We are doing regression
lowercase_ = MSELoss()
lowercase_ = loss_fct(logits_item.view(-1) , labels.view(-1))
else:
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(logits_item.view(-1 , self.num_labels) , labels.view(-1))
if total_loss is None:
lowercase_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowercase_ = (total_loss / total_weights,) + outputs
return outputs
| 313 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 'bit'
SCREAMING_SNAKE_CASE : Union[str, Any] = ['preactivation', 'bottleneck']
SCREAMING_SNAKE_CASE : str = ['SAME', 'VALID']
def __init__( self : Optional[Any] ,lowercase__ : Any=3 ,lowercase__ : Tuple=6_4 ,lowercase__ : List[str]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] ,lowercase__ : Any=[3, 4, 6, 3] ,lowercase__ : str="preactivation" ,lowercase__ : Dict="relu" ,lowercase__ : Optional[int]=None ,lowercase__ : str=3_2 ,lowercase__ : int=0.0 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Any=1 ,lowercase__ : Any=None ,lowercase__ : Dict=None ,**lowercase__ : Union[str, Any] ,):
super().__init__(**lowercase__ )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__lowercase = global_padding.upper()
else:
raise ValueError(F"Padding strategy {global_padding} not supported" )
__lowercase = num_channels
__lowercase = embedding_size
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = layer_type
__lowercase = hidden_act
__lowercase = global_padding
__lowercase = num_groups
__lowercase = drop_path_rate
__lowercase = embedding_dynamic_padding
__lowercase = output_stride
__lowercase = width_factor
__lowercase = ['''stem'''] + [F"stage{idx}" for idx in range(1 ,len(lowercase__ ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowercase__ ,out_indices=lowercase__ ,stage_names=self.stage_names )
| 104 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowercase ( ) -> List[str]:
__SCREAMING_SNAKE_CASE = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw ).convert('RGB' )
return image
def __lowercase ( a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def __lowercase ( a__ , a__ , a__ ) -> int:
__SCREAMING_SNAKE_CASE = dct.pop(a__ )
__SCREAMING_SNAKE_CASE = val
def __lowercase ( a__ , a__ ) -> Optional[int]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE = torch.cat((q_bias, torch.zeros_like(a__ , requires_grad=a__ ), v_bias) )
__SCREAMING_SNAKE_CASE = qkv_bias
def __lowercase ( a__ , a__ ) -> int:
__SCREAMING_SNAKE_CASE = 3_64 if 'coco' in model_name else 2_24
__SCREAMING_SNAKE_CASE = BlipaVisionConfig(image_size=a__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=a__ ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=a__ ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE = BlipaConfig(vision_config=a__ , text_config=a__ )
return config, image_size
@torch.no_grad()
def __lowercase ( a__ , a__=None , a__=False ) -> Any:
__SCREAMING_SNAKE_CASE = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__SCREAMING_SNAKE_CASE = tokenizer('\n' , add_special_tokens=a__ ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_blipa_config(a__ , eos_token_id=a__ )
__SCREAMING_SNAKE_CASE = BlipaForConditionalGeneration(a__ ).eval()
__SCREAMING_SNAKE_CASE = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__SCREAMING_SNAKE_CASE = 'cuda' if torch.cuda.is_available() else 'cpu'
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = load_model_and_preprocess(
name=a__ , model_type=a__ , is_eval=a__ , device=a__ )
original_model.eval()
print('Done!' )
# update state dict keys
__SCREAMING_SNAKE_CASE = original_model.state_dict()
__SCREAMING_SNAKE_CASE = create_rename_keys(a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE = state_dict.pop(a__ )
if key.startswith('Qformer.bert' ):
__SCREAMING_SNAKE_CASE = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE = key.replace('self' , 'attention' )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__SCREAMING_SNAKE_CASE = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__SCREAMING_SNAKE_CASE = key.replace('t5' , 'language' )
__SCREAMING_SNAKE_CASE = val
# read in qv biases
read_in_q_v_bias(a__ , a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hf_model.load_state_dict(a__ , strict=a__ )
assert len(a__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE = load_demo_image()
__SCREAMING_SNAKE_CASE = vis_processors['eval'](a__ ).unsqueeze(0 ).to(a__ )
__SCREAMING_SNAKE_CASE = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(a__ )
# create processor
__SCREAMING_SNAKE_CASE = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=a__ , image_std=a__ )
__SCREAMING_SNAKE_CASE = BlipaProcessor(image_processor=a__ , tokenizer=a__ )
__SCREAMING_SNAKE_CASE = processor(images=a__ , return_tensors='pt' ).pixel_values.to(a__ )
# make sure processor creates exact same pixel values
assert torch.allclose(a__ , a__ )
original_model.to(a__ )
hf_model.to(a__ )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__SCREAMING_SNAKE_CASE = hf_model(a__ , a__ ).logits
else:
__SCREAMING_SNAKE_CASE = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__SCREAMING_SNAKE_CASE = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__SCREAMING_SNAKE_CASE = hf_model(a__ , a__ , labels=a__ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=a__ )
assert torch.allclose(logits[0, :3, :3] , a__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=a__ )
else:
# cast to same type
__SCREAMING_SNAKE_CASE = logits.dtype
assert torch.allclose(original_logits.to(a__ ) , a__ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
__SCREAMING_SNAKE_CASE = ''
__SCREAMING_SNAKE_CASE = tokenizer(a__ , return_tensors='pt' ).input_ids.to(a__ )
__SCREAMING_SNAKE_CASE = original_model.generate({'image': original_pixel_values} )
__SCREAMING_SNAKE_CASE = hf_model.generate(
a__ , a__ , do_sample=a__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , a__ )
__SCREAMING_SNAKE_CASE = input_ids.shape[1]
__SCREAMING_SNAKE_CASE = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=a__ )
__SCREAMING_SNAKE_CASE = [text.strip() for text in output_text]
print('HF generation:' , a__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(a__ )
hf_model.save_pretrained(a__ )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ : Dict =argparse.ArgumentParser()
lowerCAmelCase__ : Union[str, Any] =[
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
lowerCAmelCase__ : int =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 257 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _lowerCamelCase ( lowerCAmelCase__ ):
UpperCAmelCase_ = "vit_msn"
def __init__(self , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1e-0_6 , __a=2_24 , __a=16 , __a=3 , __a=True , **__a , ) -> Tuple:
super().__init__(**a__ )
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = qkv_bias
| 364 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a , __a , __a = None , __a = None , __a = False , **__a , ) -> List[Any]:
super().__init__(features=__a , cache_dir=__a , keep_in_memory=__a , **__a )
UpperCamelCase = Sql(
cache_dir=__a , features=__a , sql=__a , con=__a , **__a , )
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , )
# Build dataset for splits
UpperCamelCase = self.builder.as_dataset(
split="train" , verification_mode=__a , in_memory=self.keep_in_memory )
return dataset
class _lowerCamelCase :
def __init__(self , __a , __a , __a , __a = None , __a = None , **__a , ) -> Tuple:
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
UpperCamelCase = dataset
UpperCamelCase = name
UpperCamelCase = con
UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase = num_proc
UpperCamelCase = to_sql_kwargs
def snake_case_ (self ) -> int:
UpperCamelCase = self.to_sql_kwargs.pop("sql" , __a )
UpperCamelCase = self.to_sql_kwargs.pop("con" , __a )
UpperCamelCase = self.to_sql_kwargs.pop("index" , __a )
UpperCamelCase = self._write(index=__a , **self.to_sql_kwargs )
return written
def snake_case_ (self , __a ) -> Any:
UpperCamelCase , UpperCamelCase , UpperCamelCase = args
UpperCamelCase = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
UpperCamelCase = query_table(
table=self.dataset.data , key=slice(__a , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase = batch.to_pandas()
UpperCamelCase = df.to_sql(self.name , self.con , index=__a , **__a )
return num_rows or len(__a )
def snake_case_ (self , __a , **__a ) -> int:
UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase , UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __a , __a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 244 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : Dict = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class _UpperCamelCase ( _a ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] ="""lxmert"""
__UpperCAmelCase : str ={}
def __init__( self , __a=3_05_22 , __a=7_68 , __a=12 , __a=95_00 , __a=16_00 , __a=4_00 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.0_2 , __a=1e-1_2 , __a=9 , __a=5 , __a=5 , __a=20_48 , __a=4 , __a=6.6_7 , __a=True , __a=True , __a=True , __a=True , __a=True , __a=True , __a=True , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = num_qa_labels
__lowerCAmelCase = num_object_labels
__lowerCAmelCase = num_attr_labels
__lowerCAmelCase = l_layers
__lowerCAmelCase = x_layers
__lowerCAmelCase = r_layers
__lowerCAmelCase = visual_feat_dim
__lowerCAmelCase = visual_pos_dim
__lowerCAmelCase = visual_loss_normalizer
__lowerCAmelCase = task_matched
__lowerCAmelCase = task_mask_lm
__lowerCAmelCase = task_obj_predict
__lowerCAmelCase = task_qa
__lowerCAmelCase = visual_obj_loss
__lowerCAmelCase = visual_attr_loss
__lowerCAmelCase = visual_feat_loss
__lowerCAmelCase = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**__UpperCAmelCase )
| 57 |
"""simple docstring"""
def lowercase ( A_ , A_ )-> float:
'''simple docstring'''
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A_ ) * abs(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 40 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = MgpstrTokenizer
snake_case = False
snake_case = {}
snake_case = False
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().setUp()
# fmt: off
_A = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_A = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
def lowerCAmelCase ( self : int , **__UpperCAmelCase : Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = "tester"
_A = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = self.get_tokenizers(do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_A = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_A = tokenizer.encode([special_token] , add_special_tokens=__UpperCAmelCase )
self.assertEqual(len(__UpperCAmelCase ) , 1 )
_A = tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_A , _A = self.get_input_output_texts(__UpperCAmelCase )
_A = tokenizer.tokenize(__UpperCAmelCase )
_A = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
_A = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertNotEqual(len(__UpperCAmelCase ) , 0 )
_A = tokenizer.decode(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(text_a.replace(" " , "" ) , __UpperCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
| 174 |
'''simple docstring'''
import os
lowerCamelCase_ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
_A = 0
_A = 0
while index < len(__lowercase ) - 1:
_A = SYMBOLS[numerals[index]]
_A = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
_A = ""
_A = num // 1000
numerals += m_count * "M"
num %= 1000
_A = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_A = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __lowercase ( __lowercase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_A = 0
with open(os.path.dirname(__lowercase ) + roman_numerals_filename ) as filea:
_A = filea.readlines()
for line in lines:
_A = line.strip()
_A = parse_roman_numerals(__lowercase )
_A = generate_roman_numerals(__lowercase )
savings += len(__lowercase ) - len(__lowercase )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: str , __UpperCAmelCase: str , __UpperCAmelCase: PreTrainedTokenizer , __UpperCAmelCase: int , __UpperCAmelCase: Optional[int] = None , ) -> List[Any]:
UpperCamelCase__ : Dict = {}
if train_file is not None:
UpperCamelCase__ : str = [train_file]
if eval_file is not None:
UpperCamelCase__ : Union[str, Any] = [eval_file]
if test_file is not None:
UpperCamelCase__ : Tuple = [test_file]
UpperCamelCase__ : Optional[Any] = datasets.load_dataset('''csv''' , data_files=__UpperCAmelCase )
UpperCamelCase__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
UpperCamelCase__ : str = features_name.pop(__UpperCAmelCase )
UpperCamelCase__ : List[str] = list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCamelCase__ : Optional[Any] = {label: i for i, label in enumerate(__UpperCAmelCase )}
UpperCamelCase__ : Union[str, Any] = tokenizer.model_input_names
UpperCamelCase__ : str = {}
if len(__UpperCAmelCase ) == 1:
for k in files.keys():
UpperCamelCase__ : Optional[int] = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' ) , batched=__UpperCAmelCase , )
elif len(__UpperCAmelCase ) == 2:
for k in files.keys():
UpperCamelCase__ : Dict = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' , ) , batched=__UpperCAmelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCamelCase__ : Any = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCamelCase__ : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCamelCase__ : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase__ : int = labelaid[ex[label_name]]
yield (d, label)
UpperCamelCase__ : Tuple = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCamelCase__ : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCamelCase__ : int = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCamelCase__ : Dict = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCamelCase__ : Optional[Any] = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCamelCase__ : Union[str, Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class lowercase__ :
'''simple docstring'''
a : int = field(metadata={"help": "Which column contains the label"} )
a : str = field(default=__lowerCamelCase , metadata={"help": "The path of the training file"} )
a : Optional[str] = field(default=__lowerCamelCase , metadata={"help": "The path of the development file"} )
a : Optional[str] = field(default=__lowerCamelCase , metadata={"help": "The path of the test file"} )
a : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a : bool = field(
default=__lowerCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class lowercase__ :
'''simple docstring'''
a : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : bool = field(default=__lowerCamelCase , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowerCAmelCase_ ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
f"16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__UpperCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
UpperCamelCase__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__UpperCAmelCase ) , labelaid=__UpperCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
UpperCamelCase__ : str = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(__UpperCAmelCase: EvalPrediction ) -> Dict:
UpperCamelCase__ : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCamelCase__ : Union[str, Any] = TFTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ : List[str] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase__ : Tuple = trainer.evaluate()
UpperCamelCase__ : Optional[int] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(__UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
results.update(__UpperCAmelCase )
return results
if __name__ == "__main__":
main()
| 201 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Optional[int] = "informer"
a : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self, __magic_name__ = None, __magic_name__ = None, __magic_name__ = "student_t", __magic_name__ = "nll", __magic_name__ = 1, __magic_name__ = None, __magic_name__ = "mean", __magic_name__ = 0, __magic_name__ = 0, __magic_name__ = 0, __magic_name__ = 0, __magic_name__ = None, __magic_name__ = None, __magic_name__ = 64, __magic_name__ = 32, __magic_name__ = 32, __magic_name__ = 2, __magic_name__ = 2, __magic_name__ = 2, __magic_name__ = 2, __magic_name__ = True, __magic_name__ = "gelu", __magic_name__ = 0.05, __magic_name__ = 0.1, __magic_name__ = 0.1, __magic_name__ = 0.1, __magic_name__ = 0.1, __magic_name__ = 100, __magic_name__ = 0.02, __magic_name__=True, __magic_name__ = "prob", __magic_name__ = 5, __magic_name__ = True, **__magic_name__, ) -> Optional[int]:
"""simple docstring"""
# time series specific configuration
UpperCamelCase__ : List[Any] = prediction_length
UpperCamelCase__ : Any = context_length or prediction_length
UpperCamelCase__ : Optional[int] = distribution_output
UpperCamelCase__ : Union[str, Any] = loss
UpperCamelCase__ : Optional[Any] = input_size
UpperCamelCase__ : Dict = num_time_features
UpperCamelCase__ : Dict = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
UpperCamelCase__ : Optional[int] = scaling
UpperCamelCase__ : Any = num_dynamic_real_features
UpperCamelCase__ : Optional[int] = num_static_real_features
UpperCamelCase__ : Optional[Any] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__magic_name__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCamelCase__ : str = cardinality
else:
UpperCamelCase__ : int = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__magic_name__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCamelCase__ : Any = embedding_dimension
else:
UpperCamelCase__ : Union[str, Any] = [min(50, (cat + 1) // 2 ) for cat in self.cardinality]
UpperCamelCase__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
UpperCamelCase__ : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCamelCase__ : Optional[Any] = d_model
UpperCamelCase__ : Tuple = encoder_attention_heads
UpperCamelCase__ : Any = decoder_attention_heads
UpperCamelCase__ : Dict = encoder_ffn_dim
UpperCamelCase__ : Optional[Any] = decoder_ffn_dim
UpperCamelCase__ : str = encoder_layers
UpperCamelCase__ : Optional[int] = decoder_layers
UpperCamelCase__ : Optional[Any] = dropout
UpperCamelCase__ : List[Any] = attention_dropout
UpperCamelCase__ : Any = activation_dropout
UpperCamelCase__ : Optional[int] = encoder_layerdrop
UpperCamelCase__ : Union[str, Any] = decoder_layerdrop
UpperCamelCase__ : Tuple = activation_function
UpperCamelCase__ : List[str] = init_std
UpperCamelCase__ : int = use_cache
# Informer
UpperCamelCase__ : Optional[int] = attention_type
UpperCamelCase__ : Optional[int] = sampling_factor
UpperCamelCase__ : Dict = distil
super().__init__(is_encoder_decoder=__magic_name__, **__magic_name__ )
@property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 201 | 1 |
from __future__ import annotations
import numpy as np
def lowercase_ ( _lowerCamelCase: np.ndarray ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
__lowerCamelCase : Any = np.shape(_lowerCamelCase )
if rows != columns:
__lowerCamelCase : Tuple = (
"'table' has to be of square shaped array but got a "
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(_lowerCamelCase )
__lowerCamelCase : Optional[int] = np.zeros((rows, columns) )
__lowerCamelCase : str = np.zeros((rows, columns) )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
__lowerCamelCase : int = sum(lower[i][k] * upper[k][j] for k in range(_lowerCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
__lowerCamelCase : Optional[int] = (table[i][j] - total) / upper[j][j]
__lowerCamelCase : Any = 1
for j in range(_lowerCamelCase , _lowerCamelCase ):
__lowerCamelCase : Dict = sum(lower[i][k] * upper[k][j] for k in range(_lowerCamelCase ) )
__lowerCamelCase : Dict = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod() | 354 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = DiTPipeline
snake_case__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
snake_case__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def lowerCamelCase__ ( self : Tuple ):
torch.manual_seed(0 )
__lowerCamelCase : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=UpperCAmelCase , )
__lowerCamelCase : List[str] = AutoencoderKL()
__lowerCamelCase : List[Any] = DDIMScheduler()
__lowerCamelCase : Optional[Any] = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any]=0 ):
if str(UpperCAmelCase ).startswith("mps" ):
__lowerCamelCase : List[str] = torch.manual_seed(UpperCAmelCase )
else:
__lowerCamelCase : List[str] = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__lowerCamelCase : str = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Dict = "cpu"
__lowerCamelCase : int = self.get_dummy_components()
__lowerCamelCase : Optional[Any] = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Optional[int] = self.get_dummy_inputs(UpperCAmelCase )
__lowerCamelCase : List[Any] = pipe(**UpperCAmelCase ).images
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCamelCase : Optional[int] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
__lowerCamelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1E-3 )
def lowerCamelCase__ ( self : Any ):
self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase__ ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : str = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__lowerCamelCase : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
__lowerCamelCase : Optional[int] = pipe.get_label_ids(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Dict = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Tuple = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__lowerCamelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__lowerCamelCase : Union[str, Any] = ["vase", "umbrella"]
__lowerCamelCase : int = pipe.get_label_ids(UpperCAmelCase )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : Dict = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1 | 64 | 0 |
class _lowerCAmelCase :
def __init__( self ) -> Optional[int]:
lowerCAmelCase_ = {} # Mapping from char to TrieNode
lowerCAmelCase_ = False
def __a ( self , _UpperCamelCase ) -> Optional[Any]:
for word in words:
self.insert(_UpperCamelCase )
def __a ( self , _UpperCamelCase ) -> int:
lowerCAmelCase_ = self
for char in word:
if char not in curr.nodes:
lowerCAmelCase_ = TrieNode()
lowerCAmelCase_ = curr.nodes[char]
lowerCAmelCase_ = True
def __a ( self , _UpperCamelCase ) -> Tuple:
lowerCAmelCase_ = self
for char in word:
if char not in curr.nodes:
return False
lowerCAmelCase_ = curr.nodes[char]
return curr.is_leaf
def __a ( self , _UpperCamelCase ) -> Union[str, Any]:
def _delete(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> bool:
if index == len(_UpperCamelCase ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCAmelCase_ = False
return len(curr.nodes ) == 0
lowerCAmelCase_ = word[index]
lowerCAmelCase_ = curr.nodes.get(_UpperCamelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCAmelCase_ = _delete(_UpperCamelCase , _UpperCamelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _UpperCamelCase , 0 )
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] ):
"""simple docstring"""
if node.is_leaf:
print(SCREAMING_SNAKE_CASE__ , end=" " )
for key, value in node.nodes.items():
print_words(SCREAMING_SNAKE_CASE__ , word + key )
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = """banana bananas bandana band apple all beast""".split()
lowerCAmelCase_ = TrieNode()
root.insert_many(SCREAMING_SNAKE_CASE__ )
# print_words(root, "")
assert all(root.find(SCREAMING_SNAKE_CASE__ ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def lowerCamelCase__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
print(str(SCREAMING_SNAKE_CASE__ ) , "works!" if passes else "doesn't work :(" )
def lowerCamelCase__ ( ):
"""simple docstring"""
assert test_trie()
def lowerCamelCase__ ( ):
"""simple docstring"""
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 231 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[int] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """ibert"""
def __init__( self : Dict , A : Union[str, Any]=30_522 , A : List[Any]=768 , A : List[Any]=12 , A : Optional[int]=12 , A : Optional[Any]=3_072 , A : int="gelu" , A : str=0.1 , A : List[Any]=0.1 , A : Optional[Any]=512 , A : int=2 , A : Union[str, Any]=0.02 , A : List[str]=1E-12 , A : Optional[int]=1 , A : Optional[int]=0 , A : List[str]=2 , A : str="absolute" , A : Any=False , A : Optional[Any]="none" , **A : Any , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
__snake_case: List[Any] = vocab_size
__snake_case: Optional[Any] = hidden_size
__snake_case: List[str] = num_hidden_layers
__snake_case: Tuple = num_attention_heads
__snake_case: List[str] = hidden_act
__snake_case: Optional[Any] = intermediate_size
__snake_case: Tuple = hidden_dropout_prob
__snake_case: List[str] = attention_probs_dropout_prob
__snake_case: Any = max_position_embeddings
__snake_case: int = type_vocab_size
__snake_case: List[str] = initializer_range
__snake_case: List[Any] = layer_norm_eps
__snake_case: Optional[int] = position_embedding_type
__snake_case: str = quant_mode
__snake_case: Optional[int] = force_dequant
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Tuple ):
if self.task == "multiple-choice":
__snake_case: List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case: List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 111 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a_ : List[Any] = logging.get_logger(__name__)
a_ : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
a_ : List[str] = {
'squeezebert/squeezebert-uncased': 5_12,
'squeezebert/squeezebert-mnli': 5_12,
'squeezebert/squeezebert-mnli-headless': 5_12,
}
a_ : List[Any] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class _snake_case ( A__ ):
_lowercase : Dict = VOCAB_FILES_NAMES
_lowercase : int = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_INIT_CONFIGURATION
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[str] = SqueezeBertTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ) -> List[Any]:
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , a) != do_lower_case
or normalizer_state.get('strip_accents' , a) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(a , normalizer_state.pop('type'))
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**a)
SCREAMING_SNAKE_CASE = do_lower_case
def SCREAMING_SNAKE_CASE__ ( self , a , a=None) -> List[str]:
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(a , name=a)
return tuple(a)
| 360 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a_ : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( A__ ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self , a = True , a = 1 / 255 , a = True , a = 8 , **a , ) -> None:
super().__init__(**a)
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
SCREAMING_SNAKE_CASE = pad_size
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None , **a) -> np.ndarray:
return rescale(a , scale=a , data_format=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_image_size(a)
SCREAMING_SNAKE_CASE = (old_height // size + 1) * size - old_height
SCREAMING_SNAKE_CASE = (old_width // size + 1) * size - old_width
return pad(a , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> List[str]:
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_pad if do_pad is not None else self.do_pad
SCREAMING_SNAKE_CASE = pad_size if pad_size is not None else self.pad_size
SCREAMING_SNAKE_CASE = make_list_of_images(a)
if not valid_images(a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(a) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=a , scale=a) for image in images]
if do_pad:
SCREAMING_SNAKE_CASE = [self.pad(a , size=a) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(a , a) for image in images]
SCREAMING_SNAKE_CASE = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a)
| 327 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ : int = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
lowerCamelCase__ : List[Any] = {'''mobilebert-uncased''': 5_12}
lowerCamelCase__ : List[str] = {}
class _UpperCAmelCase ( __a):
__a : str = VOCAB_FILES_NAMES
__a : str = PRETRAINED_VOCAB_FILES_MAP
__a : List[Any] = PRETRAINED_INIT_CONFIGURATION
__a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : List[str] = MobileBertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
_UpperCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCAmelCase ) != tokenize_chinese_chars
):
_UpperCAmelCase : List[Any] = getattr(__UpperCAmelCase , normalizer_state.pop("""type""" ) )
_UpperCAmelCase : Dict = do_lower_case
_UpperCAmelCase : Dict = strip_accents
_UpperCAmelCase : Any = tokenize_chinese_chars
_UpperCAmelCase : int = normalizer_class(**__UpperCAmelCase )
_UpperCAmelCase : Tuple = do_lower_case
def __snake_case ( self , _A , _A=None ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case ( self , _A , _A = None ) -> int:
'''simple docstring'''
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self , _A , _A = None ) -> Any:
'''simple docstring'''
_UpperCAmelCase : int = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 246 |
"""simple docstring"""
from __future__ import annotations
__A = 1.6_021e-19 # units = C
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 293 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a_ ( lowerCamelCase , unittest.TestCase ):
lowercase = XLMTokenizer
lowercase = False
def A__ ( self ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCamelCase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
UpperCamelCase = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = """lower newer"""
UpperCamelCase = """lower newer"""
return input_text, output_text
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase = """lower"""
UpperCamelCase = ["""low""", """er</w>"""]
UpperCamelCase = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase = tokens + ["""<unk>"""]
UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
UpperCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
UpperCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 354 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( lowerCamelCase ):
lowercase = """Salesforce/blip-image-captioning-base"""
lowercase = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
lowercase = """image_captioner"""
lowercase = AutoModelForVisionaSeq
lowercase = ["""image"""]
lowercase = ["""text"""]
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.pre_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.model.generate(**_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0].strip()
| 183 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def A_ ( snake_case ):
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = create_tensor(snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = gather(snake_case )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = [state.process_index]
SCREAMING_SNAKE_CASE:Optional[Any] = gather_object(snake_case )
assert len(snake_case ) == state.num_processes, F'''{gathered_obj}, {len(snake_case )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:str = create_tensor(snake_case )
SCREAMING_SNAKE_CASE:Tuple = broadcast(snake_case )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def A_ ( snake_case ):
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
SCREAMING_SNAKE_CASE:Tuple = torch.arange(state.num_processes + 1 ).to(state.device )
else:
SCREAMING_SNAKE_CASE:int = torch.arange(state.num_processes ).to(state.device )
SCREAMING_SNAKE_CASE:Dict = pad_across_processes(snake_case )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def A_ ( snake_case ):
# For now runs on only two processes
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE:Union[str, Any] = create_tensor(snake_case )
SCREAMING_SNAKE_CASE:List[str] = reduce(snake_case , "sum" )
SCREAMING_SNAKE_CASE:Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(snake_case , snake_case ), F'''{reduced_tensor} != {truth_tensor}'''
def A_ ( snake_case ):
# For now runs on only two processes
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE:str = create_tensor(snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = reduce(snake_case , "mean" )
SCREAMING_SNAKE_CASE:Tuple = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(snake_case , snake_case ), F'''{reduced_tensor} != {truth_tensor}'''
def A_ ( snake_case ):
# For xla_spawn (TPUs)
main()
def A_ ( ):
SCREAMING_SNAKE_CASE:Any = PartialState()
state.print(F'''State: {state}''' )
state.print("testing gather" )
test_gather(snake_case )
state.print("testing gather_object" )
test_gather_object(snake_case )
state.print("testing broadcast" )
test_broadcast(snake_case )
state.print("testing pad_across_processes" )
test_pad_across_processes(snake_case )
state.print("testing reduce_sum" )
test_reduce_sum(snake_case )
state.print("testing reduce_mean" )
test_reduce_mean(snake_case )
if __name__ == "__main__":
main()
| 139 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def A_ ( snake_case ):
return np.maximum(0 , snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 139 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
def __init__( self :List[Any] , __magic_name__ :Union[str, Any] , __magic_name__ :Any=2 , __magic_name__ :Dict=True , __magic_name__ :Dict=False , __magic_name__ :Dict=10 , __magic_name__ :Optional[Any]=3 , __magic_name__ :Tuple=32 * 8 , __magic_name__ :List[Any]=32 * 8 , __magic_name__ :Dict=4 , __magic_name__ :List[str]=64 , ):
'''simple docstring'''
a = parent
a = batch_size
a = is_training
a = use_auxiliary_loss
a = num_queries
a = num_channels
a = min_size
a = max_size
a = num_labels
a = hidden_dim
a = hidden_dim
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__magic_name__ )
a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__magic_name__ )
a = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__magic_name__ ) > 0.5
).float()
a = (torch.rand((self.batch_size, self.num_labels) , device=__magic_name__ ) > 0.5).long()
a = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
a = self.num_queries
a = self.num_labels
a = [1, 1, 1, 1]
a = self.num_channels
a = 64
a = 128
a = self.hidden_dim
a = self.hidden_dim
a = self.hidden_dim
return config
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a , a , a , a , a = self.prepare_config_and_inputs()
a = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def lowerCamelCase__ ( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :Dict ):
'''simple docstring'''
a = output.encoder_hidden_states
a = output.pixel_decoder_hidden_states
a = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__magic_name__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__magic_name__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__magic_name__ ) , config.decoder_layers )
def lowerCamelCase__ ( self :Dict , __magic_name__ :str , __magic_name__ :str , __magic_name__ :Union[str, Any] , __magic_name__ :Tuple=False ):
'''simple docstring'''
with torch.no_grad():
a = MaskaFormerModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(pixel_values=__magic_name__ , pixel_mask=__magic_name__ )
a = model(__magic_name__ , output_hidden_states=__magic_name__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :int , __magic_name__ :List[str] , __magic_name__ :str , __magic_name__ :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :Tuple ):
'''simple docstring'''
a = MaskaFormerForUniversalSegmentation(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
def comm_check_on_output(__magic_name__ :Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
a = model(pixel_values=__magic_name__ , pixel_mask=__magic_name__ )
a = model(__magic_name__ )
comm_check_on_output(__magic_name__ )
a = model(
pixel_values=__magic_name__ , pixel_mask=__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ )
comm_check_on_output(__magic_name__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCamelCase__ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = MaskaFormerModelTester(self )
a = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__magic_name__ , **__magic_name__ , output_hidden_states=__magic_name__ )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__magic_name__ )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__magic_name__ )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
@slow
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
a = MaskaFormerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = (self.model_tester.min_size,) * 2
a = {
"""pixel_values""": torch.randn((2, 3, *size) , device=__magic_name__ ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__magic_name__ ),
"""class_labels""": torch.zeros(2 , 10 , device=__magic_name__ ).long(),
}
a = self.model_tester.get_config()
a = MaskaFormerForUniversalSegmentation(__magic_name__ ).to(__magic_name__ )
a = model(**__magic_name__ )
self.assertTrue(outputs.loss is not None )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__magic_name__ , **__magic_name__ , output_hidden_states=__magic_name__ )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__magic_name__ ).to(__magic_name__ )
a = model(**__magic_name__ , output_attentions=__magic_name__ )
self.assertTrue(outputs.attentions is not None )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
if not self.model_tester.is_training:
return
a = self.all_model_classes[1]
a , a , a , a , a = self.model_tester.prepare_config_and_inputs()
a = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
a = model(__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ ).loss
loss.backward()
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.all_model_classes[1]
a , a , a , a , a = self.model_tester.prepare_config_and_inputs()
a = True
a = True
a = model_class(__magic_name__ ).to(__magic_name__ )
model.train()
a = model(__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ )
a = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
a = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__magic_name__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCamelCase : List[str] = 1E-4
def __A ( ) -> Dict:
a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__magic_name__ )
a = self.default_image_processor
a = prepare_img()
a = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
a = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__magic_name__ , (1, 3, 384, 384) )
with torch.no_grad():
a = model(**__magic_name__ )
a = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
a = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
a = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__magic_name__ ).eval()
a = self.default_image_processor
a = prepare_img()
a = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
a = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__magic_name__ , (1, 3, 384, 384) )
with torch.no_grad():
a = model(**__magic_name__ )
# masks_queries_logits
a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
a = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
a = torch.tensor(__magic_name__ ).to(__magic_name__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
# class_queries_logits
a = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
a = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__magic_name__ ).eval()
a = self.default_image_processor
a = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
a = inputs["""pixel_values"""].to(__magic_name__ )
a = [el.to(__magic_name__ ) for el in inputs["""mask_labels"""]]
a = [el.to(__magic_name__ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
a = model(**__magic_name__ )
self.assertTrue(outputs.loss is not None )
| 347 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig ):
UpperCamelCase__ = None
UpperCamelCase__ = "utf-8"
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = True # deprecated
UpperCamelCase__ = None # deprecated
UpperCamelCase__ = 10 << 20 # 10MB
UpperCamelCase__ = None
class __lowerCAmelCase ( datasets.ArrowBasedBuilder ):
UpperCamelCase__ = JsonConfig
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
a = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
a = data_files
if isinstance(__magic_name__ , __magic_name__ ):
a = [files]
a = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
a = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
a = [files]
a = [dl_manager.iter_files(__magic_name__ ) for file in files]
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCamelCase__ ( self :List[str] , __magic_name__ :pa.Table ):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
a = self.config.features.arrow_schema.field(__magic_name__ ).type
a = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
a = table_cast(__magic_name__ , self.config.features.arrow_schema )
return pa_table
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
a = json.load(__magic_name__ )
# We keep only the field we are interested in
a = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__magic_name__ , (list, tuple) ):
a = set().union(*[row.keys() for row in dataset] )
a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
else:
a = dataset
a = pa.Table.from_pydict(__magic_name__ )
yield file_idx, self._cast_table(__magic_name__ )
# If the file has one json object per line
else:
with open(__magic_name__ , """rb""" ) as f:
a = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
a = max(self.config.chunksize // 32 , 16 << 10 )
a = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
a = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__magic_name__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
a = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("""utf-8""" )
try:
while True:
try:
a = paj.read_json(
io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__magic_name__ , pa.ArrowInvalid )
and "straddling" not in str(__magic_name__ )
or block_size > len(__magic_name__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
a = json.load(__magic_name__ )
except json.JSONDecodeError:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON
try:
a = set().union(*[row.keys() for row in dataset] )
a = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
a = pa.Table.from_pydict(__magic_name__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(__magic_name__ )
break
else:
logger.error(F'Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}' )
raise ValueError(
F'Not able to read records in the JSON file at {file}. '
F'You should probably indicate the field of the JSON file containing your records. '
F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__magic_name__ )
batch_idx += 1
| 347 | 1 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Tuple = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'data2vec-audio'
def __init__( self: Any , _SCREAMING_SNAKE_CASE: Tuple=32 , _SCREAMING_SNAKE_CASE: Tuple=768 , _SCREAMING_SNAKE_CASE: Optional[int]=12 , _SCREAMING_SNAKE_CASE: List[str]=12 , _SCREAMING_SNAKE_CASE: Tuple=3072 , _SCREAMING_SNAKE_CASE: Any="gelu" , _SCREAMING_SNAKE_CASE: List[str]=0.1 , _SCREAMING_SNAKE_CASE: Tuple=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=0.1 , _SCREAMING_SNAKE_CASE: int=0.0 , _SCREAMING_SNAKE_CASE: List[str]=0.1 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: str=0.02 , _SCREAMING_SNAKE_CASE: Tuple=1e-5 , _SCREAMING_SNAKE_CASE: Tuple="gelu" , _SCREAMING_SNAKE_CASE: int=(512, 512, 512, 512, 512, 512, 512) , _SCREAMING_SNAKE_CASE: List[str]=(5, 2, 2, 2, 2, 2, 2) , _SCREAMING_SNAKE_CASE: str=(10, 3, 3, 3, 3, 2, 2) , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: List[str]=16 , _SCREAMING_SNAKE_CASE: Union[str, Any]=19 , _SCREAMING_SNAKE_CASE: List[Any]=5 , _SCREAMING_SNAKE_CASE: List[str]=0.05 , _SCREAMING_SNAKE_CASE: Union[str, Any]=10 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: int=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=10 , _SCREAMING_SNAKE_CASE: List[Any]=0 , _SCREAMING_SNAKE_CASE: int="sum" , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: List[Any]=256 , _SCREAMING_SNAKE_CASE: Any=(512, 512, 512, 512, 1500) , _SCREAMING_SNAKE_CASE: Union[str, Any]=(5, 3, 3, 1, 1) , _SCREAMING_SNAKE_CASE: int=(1, 2, 3, 1, 1) , _SCREAMING_SNAKE_CASE: List[str]=512 , _SCREAMING_SNAKE_CASE: str=0 , _SCREAMING_SNAKE_CASE: List[Any]=1 , _SCREAMING_SNAKE_CASE: Optional[Any]=2 , _SCREAMING_SNAKE_CASE: Optional[Any]=False , _SCREAMING_SNAKE_CASE: List[str]=3 , _SCREAMING_SNAKE_CASE: Optional[int]=2 , _SCREAMING_SNAKE_CASE: str=3 , _SCREAMING_SNAKE_CASE: int=None , **_SCREAMING_SNAKE_CASE: Dict , ) -> Dict:
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase)
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : List[str] = feat_extract_activation
__lowerCAmelCase : int = list(__lowerCAmelCase)
__lowerCAmelCase : Union[str, Any] = list(__lowerCAmelCase)
__lowerCAmelCase : List[str] = list(__lowerCAmelCase)
__lowerCAmelCase : str = conv_bias
__lowerCAmelCase : Union[str, Any] = num_conv_pos_embeddings
__lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups
__lowerCAmelCase : Union[str, Any] = conv_pos_kernel_size
__lowerCAmelCase : Any = len(self.conv_dim)
__lowerCAmelCase : Tuple = num_hidden_layers
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : Dict = hidden_act
__lowerCAmelCase : Any = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_dropout
__lowerCAmelCase : int = attention_dropout
__lowerCAmelCase : int = activation_dropout
__lowerCAmelCase : Dict = feat_proj_dropout
__lowerCAmelCase : Optional[Any] = final_dropout
__lowerCAmelCase : Optional[int] = layerdrop
__lowerCAmelCase : List[str] = layer_norm_eps
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase : Dict = mask_time_prob
__lowerCAmelCase : Tuple = mask_time_length
__lowerCAmelCase : Optional[int] = mask_time_min_masks
__lowerCAmelCase : Union[str, Any] = mask_feature_prob
__lowerCAmelCase : Tuple = mask_feature_length
__lowerCAmelCase : Optional[Any] = mask_feature_min_masks
# ctc loss
__lowerCAmelCase : Optional[Any] = ctc_loss_reduction
__lowerCAmelCase : List[str] = ctc_zero_infinity
# adapter
__lowerCAmelCase : List[Any] = add_adapter
__lowerCAmelCase : int = adapter_kernel_size
__lowerCAmelCase : Dict = adapter_stride
__lowerCAmelCase : str = num_adapter_layers
__lowerCAmelCase : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCAmelCase : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase : int = list(__lowerCAmelCase)
__lowerCAmelCase : Tuple = list(__lowerCAmelCase)
__lowerCAmelCase : List[Any] = list(__lowerCAmelCase)
__lowerCAmelCase : Union[str, Any] = xvector_output_dim
@property
def _SCREAMING_SNAKE_CASE ( self: Any) -> Union[str, Any]:
"""simple docstring"""
return math.prod(self.conv_stride) | 269 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """camembert"""
def __init__( self , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase="absolute" , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = use_cache
lowerCamelCase__ = classifier_dropout
class __A ( lowerCAmelCase ):
'''simple docstring'''
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 209 | 0 |
UpperCamelCase__ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def _a ( SCREAMING_SNAKE_CASE_ : int ):
__lowerCAmelCase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCamelCase__ = [None] * 10000000
UpperCamelCase__ = True
UpperCamelCase__ = False
def _a ( SCREAMING_SNAKE_CASE_ : int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__lowerCAmelCase = chain(next_number(SCREAMING_SNAKE_CASE_ ) )
__lowerCAmelCase = number_chain
while number < 10_00_00_00:
__lowerCAmelCase = number_chain
number *= 10
return number_chain
def _a ( SCREAMING_SNAKE_CASE_ : int = 10_00_00_00 ):
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 102 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.dummy_uncond_unet
__lowerCAmelCase = ScoreSdeVeScheduler()
__lowerCAmelCase = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=_A ).images
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=_A , return_dict=_A )[
0
]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "google/ncsnpp-church-256"
__lowerCAmelCase = UNetaDModel.from_pretrained(_A )
__lowerCAmelCase = ScoreSdeVeScheduler.from_pretrained(_A )
__lowerCAmelCase = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=1_0 , output_type="numpy" , generator=_A ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__lowerCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 102 | 1 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
_UpperCAmelCase = np.zeros_like(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
_UpperCAmelCase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
_UpperCAmelCase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
_UpperCAmelCase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__A : str = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
__A : str = np.array(Image.open(lena_path))
# kernel to be applied
__A : List[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__A : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__A : Optional[Any] = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 260 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : Optional[Any] = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """audio-spectrogram-transformer"""
def __init__( self : int , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : int=1_2 , __UpperCamelCase : List[Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Union[str, Any]=1e-12 , __UpperCamelCase : Optional[Any]=1_6 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : int=1_0 , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : str=1_0_2_4 , __UpperCamelCase : Optional[Any]=1_2_8 , **__UpperCamelCase : Any , )->Tuple:
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = patch_size
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = frequency_stride
_UpperCAmelCase = time_stride
_UpperCAmelCase = max_length
_UpperCAmelCase = num_mel_bins
| 260 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Dict = """codegen"""
__lowerCamelCase : Any = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case__=5_0400 , snake_case__=2048 , snake_case__=2048 , snake_case__=4096 , snake_case__=28 , snake_case__=16 , snake_case__=64 , snake_case__=None , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-5 , snake_case__=0.02 , snake_case__=True , snake_case__=5_0256 , snake_case__=5_0256 , snake_case__=False , **snake_case__ , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict =vocab_size
UpperCAmelCase : Any =n_ctx
UpperCAmelCase : Tuple =n_positions
UpperCAmelCase : List[Any] =n_embd
UpperCAmelCase : Tuple =n_layer
UpperCAmelCase : Tuple =n_head
UpperCAmelCase : Any =n_inner
UpperCAmelCase : List[Any] =rotary_dim
UpperCAmelCase : Union[str, Any] =activation_function
UpperCAmelCase : Tuple =resid_pdrop
UpperCAmelCase : Union[str, Any] =embd_pdrop
UpperCAmelCase : Union[str, Any] =attn_pdrop
UpperCAmelCase : Union[str, Any] =layer_norm_epsilon
UpperCAmelCase : Union[str, Any] =initializer_range
UpperCAmelCase : Tuple =use_cache
UpperCAmelCase : List[str] =bos_token_id
UpperCAmelCase : int =eos_token_id
super().__init__(
bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ )
class __snake_case ( lowerCamelCase__ ):
def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ) -> List[Any]:
'''simple docstring'''
super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ )
if not getattr(self._config , '''pad_token_id''' , snake_case__ ):
# TODO: how to do that better?
UpperCAmelCase : Optional[Any] =0
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
UpperCAmelCase : int =OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction='''inputs''' )
UpperCAmelCase : Union[str, Any] ={0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase : Optional[Any] ={0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase : int =super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : Optional[int] =OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase : List[str] =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase : Optional[Any] =seqlen + 2
UpperCAmelCase : Any =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase : List[str] =[
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
UpperCAmelCase : List[Any] =common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase : Tuple =ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase : Optional[Any] =torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return 13
| 78 | from __future__ import annotations
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> list[list[int]]:
'''simple docstring'''
UpperCAmelCase : list[list[int]] =[]
create_all_state(1 , __lowerCAmelCase , __lowerCAmelCase , [] , __lowerCAmelCase )
return result
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )-> None:
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__lowerCAmelCase , total_number - level + 2 ):
current_list.append(__lowerCAmelCase )
create_all_state(i + 1 , __lowerCAmelCase , level - 1 , __lowerCAmelCase , __lowerCAmelCase )
current_list.pop()
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
for i in total_list:
print(*__lowerCAmelCase )
if __name__ == "__main__":
__snake_case = 4
__snake_case = 2
__snake_case = generate_all_combinations(n, k)
print_all_state(total_list)
| 78 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : int = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE : Union[str, Any] = """CLIPImageProcessor"""
SCREAMING_SNAKE_CASE : Union[str, Any] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __SCREAMING_SNAKE_CASE , )
lowerCamelCase_ = kwargs.pop('feature_extractor' )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __call__( self : Dict , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : Any ) -> Any:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase_ = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if images is not None:
lowerCamelCase_ = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[Any] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def UpperCamelCase ( self : str ) -> Union[str, Any]:
lowerCamelCase_ = self.tokenizer.model_input_names
lowerCamelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase ( self : Union[str, Any] ) -> int:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def UpperCamelCase ( self : List[str] ) -> str:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 183 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]=7 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : Dict=18 , __SCREAMING_SNAKE_CASE : Union[str, Any]=30 , __SCREAMING_SNAKE_CASE : Optional[Any]=400 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=True , ) -> str:
lowerCamelCase_ = size if size is not None else {'height': 18, 'width': 18}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = apply_ocr
def UpperCamelCase ( self : int ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase ( self : List[str] ) -> int:
lowerCamelCase_ = LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase ( self : Optional[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'apply_ocr' ) )
def UpperCamelCase ( self : Any ) -> Any:
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def UpperCamelCase ( self : Dict ) -> Any:
pass
def UpperCamelCase ( self : int ) -> Dict:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , __SCREAMING_SNAKE_CASE )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCamelCase ( self : Dict ) -> int:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def UpperCamelCase ( self : Dict ) -> Any:
# with apply_OCR = True
lowerCamelCase_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
lowerCamelCase_ = Image.open(ds[0]['file'] ).convert('RGB' )
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
lowerCamelCase_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , __SCREAMING_SNAKE_CASE )
# with apply_OCR = False
lowerCamelCase_ = LayoutLMvaImageProcessor(apply_ocr=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 183 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Tuple = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
snake_case : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 359 |
import numpy
class _snake_case :
def __init__( self , _a , _a ):
__magic_name__ : Optional[Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__magic_name__ : Any = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__magic_name__ : List[str] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__magic_name__ : Any = numpy.random.rand(3 , 1 )
# Real output values provided.
__magic_name__ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__magic_name__ : Tuple = numpy.zeros(output_array.shape )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__magic_name__ : int = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__magic_name__ : Tuple = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__magic_name__ : Optional[int] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__magic_name__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
for iteration in range(1 , iterations + 1 ):
__magic_name__ : Any = self.feedforward()
self.back_propagation()
if give_loss:
__magic_name__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : str = input_arr
__magic_name__ : Optional[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__magic_name__ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__magic_name__ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase_ ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase_ ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
__magic_name__ : Tuple = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__magic_name__ : List[str] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__magic_name__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_snake_case , output_array=_snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 41 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = knapsack(__snake_case, __snake_case, __snake_case, __snake_case, index + 1 )
if weights[index] <= max_weight:
_UpperCamelCase = values[index] + knapsack(
__snake_case, __snake_case, __snake_case, max_weight - weights[index], index + 1 )
return max(__snake_case, __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = ['''pixel_values''']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**_A )
lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' )
lowercase : List[str] = do_resize
lowercase : Optional[Any] = size
lowercase : List[str] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : str = resample
lowercase : Tuple = do_rescale
lowercase : Any = rescale_factor
lowercase : Tuple = do_normalize
lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A )
elif "height" in size and "width" in size:
lowercase : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] = to_numpy_array(_A )
if do_resize:
lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
lowercase : Optional[int] = self.center_crop(_A , size=_A )
if do_rescale:
lowercase : Tuple = self.rescale(image=_A , scale=_A )
if do_normalize:
lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A )
lowercase : Any = to_channel_dimension_format(_A , _A )
return image
def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase : str = do_resize if do_resize is not None else self.do_resize
lowercase : Optional[Any] = resample if resample is not None else self.resample
lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_A , default_to_square=_A )
lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase : str = get_size_dict(_A , param_name='''crop_size''' )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : Union[str, Any] = make_batched(_A )
lowercase : Dict = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
lowercase : Tuple = {'''pixel_values''': videos}
return BatchFeature(data=_A , tensor_type=_A ) | 308 | 0 |
def _lowerCAmelCase ( A__: str , A__: int ):
'''simple docstring'''
UpperCAmelCase = word.split()
def justify(A__: list , A__: int , A__: int ) -> str:
UpperCAmelCase = max_width - width
UpperCAmelCase = len(A__ )
if len(A__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCAmelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCAmelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCAmelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(A__ ):
num_spaces_between_words_list[i] += 1
UpperCAmelCase = []
for i in range(A__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(A__ )
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = 0
for word in words:
if width + len(A__ ) + len(A__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(A__ )
width += len(A__ )
else:
# justify the line and add it to result
answer.append(justify(A__ , A__ , A__ ) )
# reset new line and new width
UpperCAmelCase , UpperCAmelCase = [word], len(A__ )
UpperCAmelCase = max_width - width - len(A__ )
answer.append(''' '''.join(A__ ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 350 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 152 | 0 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_A = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__UpperCAmelCase )
_A = -1
_A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCAmelCase )
_A = model.generate(__UpperCAmelCase , max_new_tokens=10 , do_sample=__UpperCAmelCase )
_A = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_A = TextStreamer(__UpperCAmelCase )
model.generate(__UpperCAmelCase , max_new_tokens=10 , do_sample=__UpperCAmelCase , streamer=__UpperCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_A = cs.out[:-1]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_A = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__UpperCAmelCase )
_A = -1
_A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCAmelCase )
_A = model.generate(__UpperCAmelCase , max_new_tokens=10 , do_sample=__UpperCAmelCase )
_A = tokenizer.decode(greedy_ids[0] )
_A = TextIteratorStreamer(__UpperCAmelCase )
_A = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_A = Thread(target=model.generate , kwargs=__UpperCAmelCase )
thread.start()
_A = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_A = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__UpperCAmelCase )
_A = -1
_A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCAmelCase )
_A = model.generate(__UpperCAmelCase , max_new_tokens=10 , do_sample=__UpperCAmelCase )
_A = greedy_ids[:, input_ids.shape[1] :]
_A = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_A = TextStreamer(__UpperCAmelCase , skip_prompt=__UpperCAmelCase )
model.generate(__UpperCAmelCase , max_new_tokens=10 , do_sample=__UpperCAmelCase , streamer=__UpperCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_A = cs.out[:-1]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = AutoTokenizer.from_pretrained("distilgpt2" )
_A = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__UpperCAmelCase )
_A = -1
_A = torch.ones((1, 5) , device=__UpperCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_A = TextStreamer(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
model.generate(__UpperCAmelCase , max_new_tokens=1 , do_sample=__UpperCAmelCase , streamer=__UpperCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_A = cs.out[:-1] # Remove the final "\n"
_A = tokenizer(__UpperCAmelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_A = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__UpperCAmelCase )
_A = -1
_A = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCAmelCase )
_A = TextIteratorStreamer(__UpperCAmelCase , timeout=0.001 )
_A = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_A = Thread(target=model.generate , kwargs=__UpperCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__UpperCAmelCase ):
_A = ""
for new_text in streamer:
streamer_text += new_text
| 79 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : int = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 | 0 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ (A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Optional[int] = KandinskyVaaPriorPipeline
__lowerCAmelCase :List[Any] = ["prompt"]
__lowerCAmelCase :int = ["prompt", "negative_prompt"]
__lowerCAmelCase :Tuple = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
__lowerCAmelCase :List[str] = False
@property
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
return 1_0_0
@property
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 1_2,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
a__ : List[str] = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ : List[str] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Optional[int] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
a__ : Optional[int] = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_2_4 , )
return image_processor
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = self.dummy_prior
a__ : Any = self.dummy_image_encoder
a__ : Any = self.dummy_text_encoder
a__ : Dict = self.dummy_tokenizer
a__ : int = self.dummy_image_processor
a__ : Tuple = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_0_0_0 , clip_sample=__lowercase , clip_sample_range=1_0.0 , )
a__ : Any = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=0 ) -> Any:
"""simple docstring"""
if str(__lowercase ).startswith("""mps""" ):
a__ : str = torch.manual_seed(__lowercase )
else:
a__ : int = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
a__ : List[Any] = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : str = """cpu"""
a__ : Union[str, Any] = self.get_dummy_components()
a__ : Optional[Any] = self.pipeline_class(**__lowercase )
a__ : Dict = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
a__ : Optional[int] = pipe(**self.get_dummy_inputs(__lowercase ) )
a__ : Tuple = output.image_embeds
a__ : Dict = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
a__ : Optional[int] = image[0, -1_0:]
a__ : Union[str, Any] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
a__ : Union[str, Any] = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Tuple = torch_device == """cpu"""
a__ : Optional[Any] = True
a__ : str = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : List[Any] = torch_device == """cpu"""
a__ : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 366 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCAmelCase_ ( _lowercase : Union[str, Any] , _lowercase : Dict=False) -> Any:
"""simple docstring"""
try:
a__ : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
a__ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
a__ : Optional[int] = strtobool(_lowercase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''')
return _value
_lowercase : Dict =parse_flag_from_env("RUN_SLOW", default=False)
def lowerCAmelCase_ ( _lowercase : Any) -> str:
"""simple docstring"""
return unittest.skip("""Test was skipped""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : str) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , """test is slow""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : List[Any]) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : List[Any]) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Optional[int]) -> Dict:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Tuple) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Dict) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Tuple) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Union[str, Any]) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , """test requires TPU""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : str) -> int:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Any) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : int) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : int) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Optional[int]) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""") , """test requires torch version >= 1.12.0""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Any=None , _lowercase : List[str]=None) -> Dict:
"""simple docstring"""
if test_case is None:
return partial(_lowercase , version=_lowercase)
return unittest.skipUnless(is_torch_version(""">=""" , _lowercase) , F'''test requires torch version >= {version}''')(_lowercase)
def lowerCAmelCase_ ( _lowercase : Any) -> int:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : str) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , """test requires wandb""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""")(_lowercase)
_lowercase : List[str] =(
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCAmelCase_ ( _lowercase : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(_lowercase)
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Optional[Any] = True
@classmethod
def SCREAMING_SNAKE_CASE__( cls ) -> Optional[int]:
"""simple docstring"""
a__ : Tuple = tempfile.mkdtemp()
@classmethod
def SCREAMING_SNAKE_CASE__( cls ) -> Dict:
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__lowercase )
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : Tuple = mocks if isinstance(__lowercase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCAmelCase_ ( _lowercase : Optional[int]) -> List[Any]:
"""simple docstring"""
a__ : Tuple = AcceleratorState()
a__ : List[str] = tensor[None].clone().to(state.device)
a__ : Any = gather(_lowercase).cpu()
a__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i] , _lowercase):
return False
return True
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
a__ : Any = returncode
a__ : List[Any] = stdout
a__ : Any = stderr
async def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> List[Any]:
"""simple docstring"""
while True:
a__ : str = await stream.readline()
if line:
callback(_lowercase)
else:
break
async def lowerCAmelCase_ ( _lowercase : Any , _lowercase : Union[str, Any]=None , _lowercase : List[str]=None , _lowercase : Tuple=None , _lowercase : Optional[Any]=False , _lowercase : Dict=False) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(_lowercase))
a__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowercase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
a__ : int = []
a__ : Optional[int] = []
def tee(_lowercase : List[str] , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Optional[Any]=""):
a__ : int = line.decode("""utf-8""").rstrip()
sink.append(_lowercase)
if not quiet:
print(_lowercase , _lowercase , file=_lowercase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowercase: tee(_lowercase , _lowercase , sys.stdout , label="""stdout:"""))),
asyncio.create_task(_read_stream(p.stderr , lambda _lowercase: tee(_lowercase , _lowercase , sys.stderr , label="""stderr:"""))),
] , timeout=_lowercase , )
return _RunOutput(await p.wait() , _lowercase , _lowercase)
def lowerCAmelCase_ ( _lowercase : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Tuple=None , _lowercase : Any=180 , _lowercase : List[Any]=False , _lowercase : Dict=True) -> _RunOutput:
"""simple docstring"""
a__ : Any = asyncio.get_event_loop()
a__ : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowercase , env=_lowercase , stdin=_lowercase , timeout=_lowercase , quiet=_lowercase , echo=_lowercase))
a__ : Optional[int] = """ """.join(_lowercase)
if result.returncode > 0:
a__ : List[Any] = """\n""".join(result.stderr)
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''')
return result
class snake_case__ (A__ ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( _lowercase : List[str] , _lowercase : Optional[int]=False) -> Dict:
"""simple docstring"""
try:
a__ : List[Any] = subprocess.check_output(_lowercase , stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(_lowercase , """decode"""):
a__ : Tuple = output.decode("""utf-8""")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'''Command `{' '.join(_lowercase)}` failed with the following error:\n\n{e.output.decode()}''') from e
| 266 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.