code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__a : Optional[Any] = mf_knapsack(i - 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
__a : int = max(
mf_knapsack(i - 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , mf_knapsack(i - 1 , lowerCAmelCase__ , lowerCAmelCase__ , j - wt[i - 1] ) + val[i - 1] , )
__a : str = val
return f[i][j]
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ):
__a : str = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__a : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__a : Union[str, Any] = dp[i - 1][w_]
return dp[n][w_], dp
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : list , lowerCAmelCase__ : list ):
if not (isinstance(lowerCAmelCase__ , (list, tuple) ) and isinstance(lowerCAmelCase__ , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
__a : List[str] = len(lowerCAmelCase__ )
if num_items != len(lowerCAmelCase__ ):
__a : Optional[Any] = (
'''The number of weights must be the same as the number of values.\n'''
f"But got {num_items} weights and {len(lowerCAmelCase__ )} values"
)
raise ValueError(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
if not isinstance(wt[i] , lowerCAmelCase__ ):
__a : Optional[int] = (
'''All weights must be integers but got weight of '''
f"type {type(wt[i] )} at index {i}"
)
raise TypeError(lowerCAmelCase__ )
__a , __a : Dict = knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a : set = set()
_construct_solution(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return optimal_val, example_optional_set
def __UpperCamelCase ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : set ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowerCAmelCase__ , lowerCAmelCase__ , i - 1 , lowerCAmelCase__ , lowerCAmelCase__ )
else:
optimal_set.add(lowerCAmelCase__ )
_construct_solution(lowerCAmelCase__ , lowerCAmelCase__ , i - 1 , j - wt[i - 1] , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ =[3, 2, 4, 4]
lowercase__ =[4, 3, 2, 3]
lowercase__ =4
lowercase__ =6
lowercase__ =[[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowercase__ , lowercase__ =knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowercase__ , lowercase__ =knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 216
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ =logging.get_logger(__name__)
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["pixel_values"]
def __init__(self : str , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : float = None , snake_case_ : PILImageResampling = PILImageResampling.BILINEAR , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 2_5_5 , snake_case_ : bool = True , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , **snake_case_ : Tuple , ):
super().__init__(**snake_case_ )
__a : Optional[Any] = size if size is not None else {'''shortest_edge''': 3_8_4}
__a : str = get_size_dict(snake_case_ , default_to_square=snake_case_ )
__a : Tuple = do_resize
__a : List[str] = size
# Default value set here for backwards compatibility where the value in config is None
__a : List[str] = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
__a : Tuple = resample
__a : Any = do_rescale
__a : Optional[Any] = rescale_factor
__a : int = do_normalize
__a : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase (self : int , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : float , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : str , ):
__a : str = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}" )
__a : Any = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__a : Union[str, Any] = int(shortest_edge / crop_pct )
__a : Tuple = get_resize_output_image_size(snake_case_ , size=snake_case_ , default_to_square=snake_case_ )
__a : List[str] = resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=snake_case_ , size=(shortest_edge, shortest_edge) , data_format=snake_case_ , **snake_case_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
snake_case_ , size=(shortest_edge, shortest_edge) , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Optional[int] , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Union[str, Any] , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : np.ndarray , snake_case_ : Union[float, List[float]] , snake_case_ : Union[float, List[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Union[str, Any] , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Dict , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : float = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : Dict , ):
__a : List[Any] = do_resize if do_resize is not None else self.do_resize
__a : Dict = crop_pct if crop_pct is not None else self.crop_pct
__a : Tuple = resample if resample is not None else self.resample
__a : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__a : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__a : int = do_normalize if do_normalize is not None else self.do_normalize
__a : Optional[int] = image_mean if image_mean is not None else self.image_mean
__a : Optional[int] = image_std if image_std is not None else self.image_std
__a : Dict = size if size is not None else self.size
__a : List[str] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
__a : List[str] = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__a : List[Any] = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
__a : List[str] = [self.resize(image=snake_case_ , size=snake_case_ , crop_pct=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
__a : List[Any] = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
__a : Optional[int] = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
__a : str = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
__a : Tuple = {'''pixel_values''': images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 216
| 1
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_UpperCAmelCase = None
_UpperCAmelCase = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_UpperCAmelCase = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class snake_case_ :
A_ = True
A_ = None
# Automatically constructed
A_ = 'PIL.Image.Image'
A_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
A_ = field(default='Image' ,init=_A ,repr=_A )
def __call__( self : str )->Tuple:
'''simple docstring'''
return self.pa_type
def UpperCAmelCase__ ( self : Tuple , _snake_case : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] )->List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = np.array(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__SCREAMING_SNAKE_CASE )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def UpperCAmelCase__ ( self : int , _snake_case : dict , _snake_case : Union[str, Any]=None )->Union[str, Any]:
'''simple docstring'''
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
__lowerCAmelCase : int = {}
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = PIL.Image.open(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : str = path.split("""::""" )[-1]
try:
__lowerCAmelCase : Dict = string_to_dict(__SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )["""repo_id"""]
__lowerCAmelCase : Any = token_per_repo_id.get(__SCREAMING_SNAKE_CASE )
except ValueError:
__lowerCAmelCase : int = None
with xopen(__SCREAMING_SNAKE_CASE , """rb""" , use_auth_token=__SCREAMING_SNAKE_CASE ) as f:
__lowerCAmelCase : Optional[int] = BytesIO(f.read() )
__lowerCAmelCase : int = PIL.Image.open(bytes_ )
else:
__lowerCAmelCase : int = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def UpperCAmelCase__ ( self : Optional[Any] )->int:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : Union[pa.StringArray, pa.StructArray, pa.ListArray] )->Any:
'''simple docstring'''
if pa.types.is_string(storage.type ):
__lowerCAmelCase : Optional[Any] = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.binary() )
__lowerCAmelCase : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__lowerCAmelCase : List[Any] = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.string() )
__lowerCAmelCase : Tuple = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
__lowerCAmelCase : Tuple = storage.field("""bytes""" )
else:
__lowerCAmelCase : Tuple = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
__lowerCAmelCase : List[str] = storage.field("""path""" )
else:
__lowerCAmelCase : List[str] = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.string() )
__lowerCAmelCase : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__lowerCAmelCase : Optional[int] = pa.array(
[encode_np_array(np.array(__SCREAMING_SNAKE_CASE ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__lowerCAmelCase : Dict = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.string() )
__lowerCAmelCase : Tuple = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__SCREAMING_SNAKE_CASE , self.pa_type )
def UpperCAmelCase__ ( self : Any , _snake_case : pa.StructArray )->Optional[int]:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(_snake_case : str ):
with xopen(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__lowerCAmelCase : str = f.read()
return bytes_
__lowerCAmelCase : Optional[Any] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__lowerCAmelCase : Any = pa.array(
[os.path.basename(__SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
__lowerCAmelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__SCREAMING_SNAKE_CASE , self.pa_type )
def _SCREAMING_SNAKE_CASE ( ) -> Any:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__lowerCAmelCase : Any = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any ) -> Union[str, Any]:
__lowerCAmelCase : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
__lowerCAmelCase : List[Any] = image.format
else:
__lowerCAmelCase : str = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(lowercase__ , format=lowercase__ )
return buffer.getvalue()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] ) -> List[Any]:
if hasattr(lowercase__ , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowercase__ )}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str ) -> List[Any]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
__lowerCAmelCase : Optional[Any] = array.dtype
__lowerCAmelCase : Optional[int] = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
__lowerCAmelCase : int = dtype.kind
__lowerCAmelCase : List[str] = dtype.itemsize
__lowerCAmelCase : Tuple = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__lowerCAmelCase : Any = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__lowerCAmelCase : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__lowerCAmelCase : Union[str, Any] = dtype_byteorder + dtype_kind + str(lowercase__ )
__lowerCAmelCase : Dict = np.dtype(lowercase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
__lowerCAmelCase : List[str] = PIL.Image.fromarray(array.astype(lowercase__ ) )
return {"path": None, "bytes": image_to_bytes(lowercase__ )}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] ) -> Any:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = first_non_null_value(lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowercase__ , np.ndarray ):
__lowerCAmelCase : Any = no_op_if_value_is_null(lowercase__ )
return [obj_to_image_dict_func(lowercase__ ) for obj in objs]
elif isinstance(lowercase__ , PIL.Image.Image ):
__lowerCAmelCase : Optional[int] = no_op_if_value_is_null(lowercase__ )
return [obj_to_image_dict_func(lowercase__ ) for obj in objs]
else:
return objs
else:
return objs
| 363
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_UpperCAmelCase = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_UpperCAmelCase = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_UpperCAmelCase = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def UpperCAmelCase__ ( self : Dict , _snake_case : List[Any] , _snake_case : int , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , )->str:
'''simple docstring'''
__lowerCAmelCase : List[str] = len(references[0] )
if any(len(_snake_case ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__lowerCAmelCase : str = [[refs[i] for refs in references] for i in range(_snake_case )]
__lowerCAmelCase : Tuple = TER(
normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , )
__lowerCAmelCase : List[Any] = sb_ter.corpus_score(_snake_case , _snake_case )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 232
| 0
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
lowercase__ :int = logging.get_logger(__name__)
lowercase__ :Union[str, Any] = "Hello world! cécé herlolip"
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = FairseqRobertaModel.from_pretrained(lowerCAmelCase__ )
roberta.eval() # disable dropout
lowercase = roberta.model.encoder.sentence_encoder
lowercase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
lowercase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , lowerCAmelCase__ )
lowercase = XLMRobertaXLForSequenceClassification(lowerCAmelCase__ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCAmelCase__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase = roberta_sent_encoder.embed_tokens.weight
lowercase = roberta_sent_encoder.embed_positions.weight
lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowercase = roberta_sent_encoder.layer_norm.weight
lowercase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase = model.roberta.encoder.layer[i]
lowercase = roberta_sent_encoder.layers[i]
lowercase = layer.attention
lowercase = roberta_layer.self_attn_layer_norm.weight
lowercase = roberta_layer.self_attn_layer_norm.bias
# self attention
lowercase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowercase = roberta_layer.self_attn.q_proj.weight
lowercase = roberta_layer.self_attn.q_proj.bias
lowercase = roberta_layer.self_attn.k_proj.weight
lowercase = roberta_layer.self_attn.k_proj.bias
lowercase = roberta_layer.self_attn.v_proj.weight
lowercase = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowercase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowercase = roberta_layer.self_attn.out_proj.weight
lowercase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowercase = roberta_layer.final_layer_norm.weight
lowercase = roberta_layer.final_layer_norm.bias
# intermediate
lowercase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase = roberta_layer.fca.weight
lowercase = roberta_layer.fca.bias
# output
lowercase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase = roberta_layer.fca.weight
lowercase = roberta_layer.fca.bias
# end of layer
if classification_head:
lowercase = roberta.model.classification_heads['''mnli'''].dense.weight
lowercase = roberta.model.classification_heads['''mnli'''].dense.bias
lowercase = roberta.model.classification_heads['''mnli'''].out_proj.weight
lowercase = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowercase = roberta.model.encoder.lm_head.dense.weight
lowercase = roberta.model.encoder.lm_head.dense.bias
lowercase = roberta.model.encoder.lm_head.layer_norm.weight
lowercase = roberta.model.encoder.lm_head.layer_norm.bias
lowercase = roberta.model.encoder.lm_head.weight
lowercase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase = roberta.encode(lowerCAmelCase__ ).unsqueeze(0 ) # batch of size 1
lowercase = model(lowerCAmelCase__ )[0]
if classification_head:
lowercase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(lowerCAmelCase__ ) )
else:
lowercase = roberta.model(lowerCAmelCase__ )[0]
print(our_output.shape , their_output.shape )
lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
lowercase = torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(lowerCAmelCase__ ).mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
lowercase__ :Optional[int] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 101
|
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = 0
while number > 0:
_lowerCAmelCase = number % 10
sum_of_digits += last_digit
_lowerCAmelCase = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _UpperCAmelCase ( snake_case = 1_00 ):
"""simple docstring"""
_lowerCAmelCase = factorial(snake_case )
_lowerCAmelCase = split_and_add(snake_case )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 82
| 0
|
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__A = sys.version_info >= (3, 10)
def _lowerCamelCase(__UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[Any]:
return field(default_factory=lambda: default , metadata=__UpperCamelCase )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = 42
lowerCamelCase = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''titi'''
lowerCamelCase = '''toto'''
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''titi'''
lowerCamelCase = '''toto'''
lowerCamelCase = 42
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = '''toto'''
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =BasicEnum(self.foo )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = '''toto'''
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =MixedTypeEnum(self.foo )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = field(default=__magic_name__ , metadata={'''help''': '''help message'''} )
lowerCamelCase = None
lowerCamelCase = list_field(default=[] )
lowerCamelCase = list_field(default=[] )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = list_field(default=[] )
lowerCamelCase = list_field(default=[1, 2, 3] )
lowerCamelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
lowerCamelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = field()
lowerCamelCase = field()
lowerCamelCase = field()
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =BasicEnum(self.required_enum )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = 42
lowerCamelCase = field()
lowerCamelCase = None
lowerCamelCase = field(default='''toto''' , metadata={'''help''': '''help message'''} )
lowerCamelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = field(default=__magic_name__ , metadata={'''help''': '''help message'''} )
lowerCamelCase = None
lowerCamelCase = list_field(default=[] )
lowerCamelCase = list_field(default=[] )
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_lowerCAmelCase ={k: v for k, v in vars(__UpperCAmelCase ).items() if k != """container"""}
_lowerCAmelCase ={k: v for k, v in vars(__UpperCAmelCase ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , __UpperCAmelCase ) and yy.get("""choices""" , __UpperCAmelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](__UpperCAmelCase ) , yy["""type"""](__UpperCAmelCase ) )
del xx["type"], yy["type"]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument("""--bar""" , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument("""--baz""" , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument("""--flag""" , type=__UpperCAmelCase , default=__UpperCAmelCase , const=__UpperCAmelCase , nargs="""?""" )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((_lowerCAmelCase ) , ) =parser.parse_args_into_dataclasses(__UpperCAmelCase , look_for_args_file=__UpperCAmelCase )
self.assertFalse(example.flag )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=__UpperCAmelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=__UpperCAmelCase , help="""help message""" )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__UpperCAmelCase , default=__UpperCAmelCase , const=__UpperCAmelCase , nargs="""?""" )
expected.add_argument("""--baz""" , type=__UpperCAmelCase , default=__UpperCAmelCase , const=__UpperCAmelCase , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=__UpperCAmelCase , dest="""baz""" )
expected.add_argument("""--opt""" , type=__UpperCAmelCase , default=__UpperCAmelCase )
_lowerCAmelCase =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__UpperCAmelCase )
for dataclass_type in dataclass_types:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =parser.parse_args([] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
_lowerCAmelCase =parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
_lowerCAmelCase =parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
_lowerCAmelCase =parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
_lowerCAmelCase =parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , baz=__UpperCAmelCase , opt=__UpperCAmelCase ) )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
_lowerCAmelCase =parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_lowerCAmelCase =parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
_lowerCAmelCase =parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_lowerCAmelCase =parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
_lowerCAmelCase =parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _lowerCAmelCase ( self ) -> List[Any]:
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = '''toto'''
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
_lowerCAmelCase =parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
_lowerCAmelCase =parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=__UpperCAmelCase )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=__UpperCAmelCase )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__UpperCAmelCase )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=__UpperCAmelCase )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =parser.parse_args([] )
self.assertEqual(
__UpperCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
_lowerCAmelCase =parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(__UpperCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=__UpperCAmelCase , type=__UpperCAmelCase )
expected.add_argument("""--bar""" , default=__UpperCAmelCase , type=__UpperCAmelCase , help="""help message""" )
expected.add_argument("""--baz""" , default=__UpperCAmelCase , type=__UpperCAmelCase )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=__UpperCAmelCase )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=__UpperCAmelCase )
_lowerCAmelCase =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__UpperCAmelCase )
for dataclass_type in dataclass_types:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =parser.parse_args([] )
self.assertEqual(__UpperCAmelCase , Namespace(foo=__UpperCAmelCase , bar=__UpperCAmelCase , baz=__UpperCAmelCase , ces=[] , des=[] ) )
_lowerCAmelCase =parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(__UpperCAmelCase , Namespace(foo=12 , bar=3.1_4 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument("""--required_str""" , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=__UpperCAmelCase , )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__UpperCAmelCase , required=__UpperCAmelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=__UpperCAmelCase , )
expected.add_argument("""--opt""" , type=__UpperCAmelCase , default=__UpperCAmelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=__UpperCAmelCase , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__UpperCAmelCase )
self.argparsersEqual(__UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase ={
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
_lowerCAmelCase =parser.parse_dict(__UpperCAmelCase )[0]
_lowerCAmelCase =BasicExample(**__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase ={
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(__UpperCAmelCase , parser.parse_dict , __UpperCAmelCase , allow_extra_keys=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase ={
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase =os.path.join(__UpperCAmelCase , """temp_json""" )
os.mkdir(__UpperCAmelCase )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
_lowerCAmelCase =BasicExample(**__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
_lowerCAmelCase ={
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase =os.path.join(__UpperCAmelCase , """temp_yaml""" )
os.mkdir(__UpperCAmelCase )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
_lowerCAmelCase =BasicExample(**__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =HfArgumentParser(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
| 361
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341
| 0
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : Optional[int] = '''▁'''
lowerCAmelCase__ : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = BigBirdTokenizer
__lowerCamelCase = BigBirdTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
snake_case__ : Union[str, Any] = self.tokenizer_class(__UpperCamelCase , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = '<s>'
snake_case__ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(__UpperCamelCase ) , 1004 )
def __a ( self ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __a ( self ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : List[Any] = self.get_rust_tokenizer()
snake_case__ : Dict = 'I was born in 92000, and this is falsé.'
snake_case__ : List[Any] = tokenizer.tokenize(__UpperCamelCase )
snake_case__ : Optional[Any] = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : Union[str, Any] = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
snake_case__ : Tuple = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : int = self.get_rust_tokenizer()
snake_case__ : List[Any] = tokenizer.encode(__UpperCamelCase )
snake_case__ : Any = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = BigBirdTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
snake_case__ : Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [285, 46, 10, 170, 382] , )
snake_case__ : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case__ : Dict = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def __a ( self ) -> List[Any]:
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Optional[Any] = 'Hello World!'
snake_case__ : Dict = [65, 18536, 2260, 101, 66]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Union[str, Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
snake_case__ : Optional[int] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@require_torch
@slow
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
snake_case__ : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case__ : Any = ' '.join(__UpperCamelCase )
snake_case__ : Optional[int] = self.big_tokenizer.encode_plus(__UpperCamelCase , return_tensors='pt' , return_token_type_ids=__UpperCamelCase )
snake_case__ : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=__UpperCamelCase )
snake_case__ : List[Any] = BigBirdConfig(attention_type='original_full' )
snake_case__ : Union[str, Any] = BigBirdModel(__UpperCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCamelCase )
model(**__UpperCamelCase )
@slow
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Any = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
snake_case__ : Dict = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : int = {'input_ids': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 143
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __snake_case :
def __init__( self , __UpperCamelCase , ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = parent
snake_case__ : Union[str, Any] = 13
snake_case__ : int = 7
snake_case__ : str = True
snake_case__ : Dict = True
snake_case__ : Tuple = False
snake_case__ : Union[str, Any] = True
snake_case__ : Dict = 99
snake_case__ : Tuple = 32
snake_case__ : Optional[int] = 2
snake_case__ : Dict = 4
snake_case__ : Dict = 37
snake_case__ : Any = 'gelu'
snake_case__ : Any = 0.1
snake_case__ : Any = 0.1
snake_case__ : List[Any] = 512
snake_case__ : Optional[Any] = 16
snake_case__ : Optional[int] = 2
snake_case__ : List[Any] = 0.0_2
snake_case__ : Tuple = 3
snake_case__ : Dict = 4
snake_case__ : Tuple = None
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Optional[Any] = None
if self.use_input_mask:
snake_case__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Any = None
snake_case__ : Union[str, Any] = None
snake_case__ : Tuple = None
if self.use_labels:
snake_case__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : List[str] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = TFDistilBertModel(config=__UpperCamelCase )
snake_case__ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
snake_case__ : List[str] = model(__UpperCamelCase )
snake_case__ : Union[str, Any] = [input_ids, input_mask]
snake_case__ : Optional[int] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = TFDistilBertForMaskedLM(config=__UpperCamelCase )
snake_case__ : int = {'input_ids': input_ids, 'attention_mask': input_mask}
snake_case__ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : Dict = TFDistilBertForQuestionAnswering(config=__UpperCamelCase )
snake_case__ : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
snake_case__ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = self.num_labels
snake_case__ : Tuple = TFDistilBertForSequenceClassification(__UpperCamelCase )
snake_case__ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
snake_case__ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
'''simple docstring'''
snake_case__ : Optional[int] = self.num_choices
snake_case__ : str = TFDistilBertForMultipleChoice(__UpperCamelCase )
snake_case__ : Dict = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Tuple = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Dict = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
snake_case__ : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[int] = self.num_labels
snake_case__ : Optional[int] = TFDistilBertForTokenClassification(__UpperCamelCase )
snake_case__ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
snake_case__ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = self.prepare_config_and_inputs()
((snake_case__) , (snake_case__) , (snake_case__) , (snake_case__) , (snake_case__) , (snake_case__)) : Tuple = config_and_inputs
snake_case__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__lowerCamelCase = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Dict = TFDistilBertModelTester(self )
snake_case__ : Tuple = ConfigTester(self , config_class=__UpperCamelCase , dim=37 )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__UpperCamelCase )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__UpperCamelCase )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__UpperCamelCase )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__UpperCamelCase )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__UpperCamelCase )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__UpperCamelCase )
@slow
def __a ( self ) -> Any:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
snake_case__ : Optional[Any] = TFDistilBertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
snake_case__ : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case__ : Dict = model(__UpperCamelCase )[0]
snake_case__ : Optional[int] = [1, 6, 768]
self.assertEqual(output.shape , __UpperCamelCase )
snake_case__ : List[Any] = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 )
| 143
| 1
|
"""simple docstring"""
import sys
from pathlib import Path
UpperCamelCase__ = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
UpperCamelCase__ = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
UpperCamelCase__ = 'zero2'
UpperCamelCase__ = 'zero3'
UpperCamelCase__ = [ZEROa, ZEROa]
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__lowerCAmelCase = parameterized.to_safe_name("_".join(str(UpperCAmelCase_ ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
UpperCamelCase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class a__ ( _lowerCamelCase ):
@parameterized.expand(_A , name_func=_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
self.run_and_check(
stage=_A , model=_A , distributed=_A , fpaa=_A , )
@require_torch_multi_gpu
@parameterized.expand(_A , name_func=_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
self.run_and_check(
stage=_A , model=_A , distributed=_A , fpaa=_A , )
@parameterized.expand(_A , name_func=_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
self.run_and_check(
stage=_A , model=_A , distributed=_A , fpaa=_A , )
@require_torch_multi_gpu
@parameterized.expand(_A , name_func=_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
self.run_and_check(
stage=_A , model=_A , distributed=_A , fpaa=_A , )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A = 1_0 , _A = True , _A = True , _A = True , ):
"""simple docstring"""
__lowerCAmelCase = models[model]
__lowerCAmelCase = self.run_trainer(
stage=_A , model_name=_A , eval_steps=_A , num_train_epochs=1 , distributed=_A , fpaa=_A , )
self.do_checks(_A )
return output_dir
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A = 1_0 , _A = 1 , _A = True , _A = True , ):
"""simple docstring"""
__lowerCAmelCase = self.get_auto_remove_tmp_dir("./xxx" , after=_A )
__lowerCAmelCase = f"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_A )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__lowerCAmelCase = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
__lowerCAmelCase = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
__lowerCAmelCase = self.get_launcher(_A )
__lowerCAmelCase = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_A , env=self.get_env() )
return output_dir
def __SCREAMING_SNAKE_CASE( self , _A=False ):
"""simple docstring"""
__lowerCAmelCase = min(2 , get_gpu_count() ) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 353
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
UpperCamelCase__ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
UpperCamelCase__ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Optional[int]="dummy_doc" ):
__lowerCAmelCase = {doc: key_lines}
__lowerCAmelCase = {doc: sys_lines}
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(SCREAMING_SNAKE_CASE_ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE_ )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE_ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(SCREAMING_SNAKE_CASE_ , sys_doc_lines[doc] , SCREAMING_SNAKE_CASE_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE_ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if remove_nested:
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowerCAmelCase = reader.get_mention_assignments(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = reader.get_mention_assignments(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"Number of resulting singleton clusters in the key "
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"files, respectively" )
return doc_coref_infos
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
__lowerCAmelCase = get_coref_infos(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for name, metric in metrics:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 1_00:.2f}""" , F""" Precision: {precision * 1_00:.2f}""" , F""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
__lowerCAmelCase = (conll / 3) * 1_00
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({"conll_score": conll} )
return output_scores
def _a ( SCREAMING_SNAKE_CASE_ : List[str] ):
__lowerCAmelCase = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
__lowerCAmelCase = line.split()[5]
if not parse_col == "-":
__lowerCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=True , _A=False , _A=False , _A=False ):
"""simple docstring"""
__lowerCAmelCase = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
__lowerCAmelCase = util.check_gold_parse_annotation(_A )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowerCAmelCase = evaluate(
key_lines=_A , sys_lines=_A , metrics=_A , NP_only=_A , remove_nested=_A , keep_singletons=_A , min_span=_A , )
return score
| 102
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[Any] = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCAmelCase_ (a__ , a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = '''nat'''
__UpperCamelCase : Tuple = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__(self , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=[3, 4, 6, 5] , SCREAMING_SNAKE_CASE__=[2, 4, 8, 16] , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=3.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = patch_size
SCREAMING_SNAKE_CASE__ : Tuple = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = embed_dim
SCREAMING_SNAKE_CASE__ : Dict = depths
SCREAMING_SNAKE_CASE__ : List[str] = len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_heads
SCREAMING_SNAKE_CASE__ : Any = kernel_size
SCREAMING_SNAKE_CASE__ : int = mlp_ratio
SCREAMING_SNAKE_CASE__ : str = qkv_bias
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = drop_path_rate
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE__ : Optional[int] = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
SCREAMING_SNAKE_CASE__ : Dict = layer_scale_init_value
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(SCREAMING_SNAKE_CASE__ ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE__ , out_indices=SCREAMING_SNAKE_CASE__ , stage_names=self.stage_names )
| 25
|
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowercase_ = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''https://pypi.org/pypi/diffusers/json'''
__A = json.loads(request.urlopen(__UpperCamelCase ).read() )['''releases'''].keys()
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : version.Version(__UpperCamelCase ) )
def lowerCAmelCase ( ):
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__UpperCamelCase )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
__A = Path(__UpperCamelCase ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
init_hf_modules()
__A = Path(__UpperCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
__A = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__A = f.read()
# Imports of the form `import .xxx`
__A = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(__UpperCamelCase ) )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = False
__A = [module_file]
__A = []
# Let's recurse through all relative imports
while not no_change:
__A = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__UpperCamelCase ) )
__A = Path(__UpperCamelCase ).parent
__A = [str(module_path / m ) for m in new_imports]
__A = [f for f in new_import_files if f not in all_relative_imports]
__A = [f'{f}.py' for f in new_import_files]
__A = len(__UpperCamelCase ) == 0
all_relative_imports.extend(__UpperCamelCase )
return all_relative_imports
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__A = f.read()
# Imports of the form `import xxx`
__A = re.findall('''^\s*import\s+(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
__A = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
__A = list(set(__UpperCamelCase ) )
__A = []
for imp in imports:
try:
importlib.import_module(__UpperCamelCase )
except ImportError:
missing_packages.append(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
f'{", ".join(__UpperCamelCase )}. Run `pip install {" ".join(__UpperCamelCase )}`' )
return get_relative_imports(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = module_path.replace(os.path.sep , '''.''' )
__A = importlib.import_module(__UpperCamelCase )
if class_name is None:
return find_pipeline_class(__UpperCamelCase )
return getattr(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
__A = dict(inspect.getmembers(__UpperCamelCase , inspect.isclass ) )
__A = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __UpperCamelCase )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
f' {loaded_module}.' )
__A = cls
return pipeline_class
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ):
"""simple docstring"""
__A = str(__UpperCamelCase )
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
__A = module_file_or_url
__A = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
__A = get_diffusers_versions()
# cut ".dev0"
__A = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
__A = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(f'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
__A = f'v{revision}'
elif revision == "main":
__A = revision
else:
raise ValueError(
f'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
f' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
__A = COMMUNITY_PIPELINES_URL.format(revision=__UpperCamelCase , pipeline=__UpperCamelCase )
try:
__A = cached_download(
__UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , )
__A = '''git'''
__A = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
__A = hf_hub_download(
__UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , )
__A = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
__A = check_imports(__UpperCamelCase )
# Now we move the module inside our cached dynamic modules.
__A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__UpperCamelCase )
__A = Path(__UpperCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__UpperCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
__A = f'{module_needed}.py'
shutil.copy(os.path.join(__UpperCamelCase , __UpperCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__A = use_auth_token
elif use_auth_token is True:
__A = HfFolder.get_token()
else:
__A = None
__A = model_info(__UpperCamelCase , revision=__UpperCamelCase , token=__UpperCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__A = submodule_path / commit_hash
__A = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__UpperCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(__UpperCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__UpperCamelCase , f'{module_needed}.py' , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , )
return os.path.join(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , ):
"""simple docstring"""
__A = get_cached_module_file(
__UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , )
return get_class_in_module(__UpperCamelCase , final_module.replace('''.py''' , '''''' ) )
| 266
| 0
|
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__A : Dict = TypeVar('KT')
__A : Tuple = TypeVar('VT')
class __UpperCamelCase ( Generic[KT, VT] ):
def __init__(self : Tuple , __SCREAMING_SNAKE_CASE : KT | str = "root" , __SCREAMING_SNAKE_CASE : VT | None = None):
A = key
A = value
A = []
def __repr__(self : Any):
return F"""Node({self.key}: {self.value})"""
@property
def SCREAMING_SNAKE_CASE__ (self : Dict):
return len(self.forward)
class __UpperCamelCase ( Generic[KT, VT] ):
def __init__(self : Union[str, Any] , __SCREAMING_SNAKE_CASE : float = 0.5 , __SCREAMING_SNAKE_CASE : int = 1_6):
A = Node[KT, VT]()
A = 0
A = p
A = max_level
def __str__(self : List[Any]):
A = list(self)
if len(__SCREAMING_SNAKE_CASE) == 0:
return F"""SkipList(level={self.level})"""
A = max((len(str(__SCREAMING_SNAKE_CASE)) for item in items) , default=4)
A = max(__SCREAMING_SNAKE_CASE , 4) + 4
A = self.head
A = []
A = node.forward.copy()
lines.append(F"""[{node.key}]""".ljust(__SCREAMING_SNAKE_CASE , "-") + "* " * len(__SCREAMING_SNAKE_CASE))
lines.append(" " * label_size + "| " * len(__SCREAMING_SNAKE_CASE))
while len(node.forward) != 0:
A = node.forward[0]
lines.append(
F"""[{node.key}]""".ljust(__SCREAMING_SNAKE_CASE , "-")
+ " ".join(str(n.key) if n.key == node.key else "|" for n in forwards))
lines.append(" " * label_size + "| " * len(__SCREAMING_SNAKE_CASE))
A = node.forward
lines.append("None".ljust(__SCREAMING_SNAKE_CASE) + "* " * len(__SCREAMING_SNAKE_CASE))
return F"""SkipList(level={self.level})\n""" + "\n".join(__SCREAMING_SNAKE_CASE)
def __iter__(self : List[str]):
A = self.head
while len(node.forward) != 0:
yield node.forward[0].key
A = node.forward[0]
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
A = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def SCREAMING_SNAKE_CASE__ (self : List[str] , __SCREAMING_SNAKE_CASE : str):
A = []
A = self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
A = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__SCREAMING_SNAKE_CASE)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : KT):
A , A = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
for i, update_node in enumerate(__SCREAMING_SNAKE_CASE):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
A = node.forward[i]
else:
A = update_node.forward[:i]
def SCREAMING_SNAKE_CASE__ (self : Dict , __SCREAMING_SNAKE_CASE : KT , __SCREAMING_SNAKE_CASE : VT):
A , A = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
A = value
else:
A = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __SCREAMING_SNAKE_CASE):
update_vector.append(self.head)
A = level
A = Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(__SCREAMING_SNAKE_CASE)
else:
A = new_node
def SCREAMING_SNAKE_CASE__ (self : List[str] , __SCREAMING_SNAKE_CASE : VT):
A , A = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
return node.value
return None
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
A = skip_list.head
A = {}
while node.level != 0:
A = node.forward[0]
A = node.value
assert len(lowercase__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
A = skip_list.head
A = {}
while node.level != 0:
A = node.forward[0]
A = node.value
if len(lowercase__ ) != 4:
print()
assert len(lowercase__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = SkipList()
assert skip_list.find("Some key" ) is None
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 142 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(lowercase__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(lowercase__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
def is_sorted(lowercase__ ):
return all(next_item >= item for item, next_item in zip(lowercase__ , lst[1:] ) )
A = SkipList()
for i in range(10 ):
skip_list.insert(lowercase__ , lowercase__ )
assert is_sorted(list(lowercase__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(lowercase__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(lowercase__ ) )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57
|
"""simple docstring"""
import numpy as np
import datasets
__A : Optional[int] = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__A : Any = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__A : List[str] = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ (self : Dict):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence") , id="X"),
}) , )
def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
# convert to numpy arrays
A = np.array(__SCREAMING_SNAKE_CASE)
A = np.array(__SCREAMING_SNAKE_CASE)
# Assert that arrays are 2D
if len(X.shape) != 2:
raise ValueError("Expected `X` to be a 2D vector")
if len(reference_distribution.shape) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector")
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension")
# Get mahalanobis distance for each prediction
A = X - np.mean(__SCREAMING_SNAKE_CASE)
A = np.cov(reference_distribution.T)
try:
A = np.linalg.inv(__SCREAMING_SNAKE_CASE)
except np.linalg.LinAlgError:
A = np.linalg.pinv(__SCREAMING_SNAKE_CASE)
A = np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = np.dot(__SCREAMING_SNAKE_CASE , X_minus_mu.T).diagonal()
return {"mahalanobis": mahal_dist}
| 57
| 1
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[PIL.Image.Image], np.ndarray]
A : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 337
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'deta'
A : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = backbone_config.pop('''model_type''' )
lowercase : Any = CONFIG_MAPPING[backbone_model_type]
lowercase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = backbone_config
lowercase : Union[str, Any] = num_queries
lowercase : Any = max_position_embeddings
lowercase : int = d_model
lowercase : Any = encoder_ffn_dim
lowercase : Optional[int] = encoder_layers
lowercase : Tuple = encoder_attention_heads
lowercase : Optional[Any] = decoder_ffn_dim
lowercase : Optional[int] = decoder_layers
lowercase : int = decoder_attention_heads
lowercase : Any = dropout
lowercase : int = attention_dropout
lowercase : Dict = activation_dropout
lowercase : int = activation_function
lowercase : Dict = init_std
lowercase : List[str] = init_xavier_std
lowercase : Optional[Any] = encoder_layerdrop
lowercase : Tuple = auxiliary_loss
lowercase : Tuple = position_embedding_type
# deformable attributes
lowercase : List[str] = num_feature_levels
lowercase : Tuple = encoder_n_points
lowercase : Optional[int] = decoder_n_points
lowercase : Tuple = two_stage
lowercase : Optional[Any] = two_stage_num_proposals
lowercase : Union[str, Any] = with_box_refine
lowercase : Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : str = bbox_cost
lowercase : List[Any] = giou_cost
# Loss coefficients
lowercase : Tuple = mask_loss_coefficient
lowercase : Any = dice_loss_coefficient
lowercase : Dict = bbox_loss_coefficient
lowercase : Tuple = giou_loss_coefficient
lowercase : Union[str, Any] = eos_coefficient
lowercase : Tuple = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
return self.d_model
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Any = self.backbone_config.to_dict()
lowercase : List[str] = self.__class__.model_type
return output
| 337
| 1
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _snake_case ( lowercase__ : Any , lowercase__ : List[Any]=1 ) -> List[str]:
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[int]=0 ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = []
for old_item in old_list:
lowerCAmelCase_ :Dict = old_item.replace("""in_layers.0""" , """norm1""" )
lowerCAmelCase_ :Dict = new_item.replace("""in_layers.2""" , """conv1""" )
lowerCAmelCase_ :Union[str, Any] = new_item.replace("""out_layers.0""" , """norm2""" )
lowerCAmelCase_ :str = new_item.replace("""out_layers.3""" , """conv2""" )
lowerCAmelCase_ :List[str] = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
lowerCAmelCase_ :Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
lowerCAmelCase_ :Dict = shave_segments(lowercase__ , n_shave_prefix_segments=lowercase__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _snake_case ( lowercase__ : Any , lowercase__ : List[str]=0 ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Any = []
for old_item in old_list:
lowerCAmelCase_ :Optional[int] = old_item
lowerCAmelCase_ :Any = new_item.replace("""norm.weight""" , """group_norm.weight""" )
lowerCAmelCase_ :Optional[int] = new_item.replace("""norm.bias""" , """group_norm.bias""" )
lowerCAmelCase_ :Union[str, Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
lowerCAmelCase_ :int = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
lowerCAmelCase_ :Optional[Any] = shave_segments(lowercase__ , n_shave_prefix_segments=lowercase__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _snake_case ( lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Optional[int]=None , lowercase__ : str=None , lowercase__ : List[Any]=None ) -> Optional[int]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowerCAmelCase_ :List[Any] = old_checkpoint[path]
lowerCAmelCase_ :Optional[Any] = old_tensor.shape[0] // 3
lowerCAmelCase_ :Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowerCAmelCase_ :int = old_tensor.shape[0] // config["""num_head_channels"""] // 3
lowerCAmelCase_ :Tuple = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowerCAmelCase_ :Dict = old_tensor.split(channels // num_heads , dim=1 )
lowerCAmelCase_ :str = query.reshape(lowercase__ )
lowerCAmelCase_ :int = key.reshape(lowercase__ )
lowerCAmelCase_ :List[str] = value.reshape(lowercase__ )
for path in paths:
lowerCAmelCase_ :int = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowerCAmelCase_ :Tuple = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
lowerCAmelCase_ :Dict = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
lowerCAmelCase_ :int = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowerCAmelCase_ :List[Any] = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowerCAmelCase_ :Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
lowerCAmelCase_ :str = old_checkpoint[path["""old"""]]
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Tuple = {}
lowerCAmelCase_ :List[Any] = checkpoint["""time_embed.0.weight"""]
lowerCAmelCase_ :Tuple = checkpoint["""time_embed.0.bias"""]
lowerCAmelCase_ :int = checkpoint["""time_embed.2.weight"""]
lowerCAmelCase_ :Optional[int] = checkpoint["""time_embed.2.bias"""]
lowerCAmelCase_ :Tuple = checkpoint["""input_blocks.0.0.weight"""]
lowerCAmelCase_ :Tuple = checkpoint["""input_blocks.0.0.bias"""]
lowerCAmelCase_ :Tuple = checkpoint["""out.0.weight"""]
lowerCAmelCase_ :str = checkpoint["""out.0.bias"""]
lowerCAmelCase_ :Dict = checkpoint["""out.2.weight"""]
lowerCAmelCase_ :int = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
lowerCAmelCase_ :Dict = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
lowerCAmelCase_ :Optional[int] = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowercase__ )
}
# Retrieves the keys for the middle blocks only
lowerCAmelCase_ :Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
lowerCAmelCase_ :Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(lowercase__ )
}
# Retrieves the keys for the output blocks only
lowerCAmelCase_ :List[Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
lowerCAmelCase_ :Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowercase__ )
}
for i in range(1 , lowercase__ ):
lowerCAmelCase_ :Any = (i - 1) // (config["""num_res_blocks"""] + 1)
lowerCAmelCase_ :Any = (i - 1) % (config["""num_res_blocks"""] + 1)
lowerCAmelCase_ :Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
lowerCAmelCase_ :Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
lowerCAmelCase_ :Tuple = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
lowerCAmelCase_ :Optional[Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
lowerCAmelCase_ :Tuple = renew_resnet_paths(lowercase__ )
lowerCAmelCase_ :int = {"""old""": f"""input_blocks.{i}.0""", """new""": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
lowerCAmelCase_ :Dict = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path, resnet_op] , config=lowercase__ )
if len(lowercase__ ):
lowerCAmelCase_ :Optional[Any] = renew_attention_paths(lowercase__ )
lowerCAmelCase_ :List[Any] = {
"""old""": f"""input_blocks.{i}.1""",
"""new""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowerCAmelCase_ :List[str] = {
f"""input_blocks.{i}.1.qkv.bias""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , attention_paths_to_split=lowercase__ , config=lowercase__ , )
lowerCAmelCase_ :List[str] = middle_blocks[0]
lowerCAmelCase_ :int = middle_blocks[1]
lowerCAmelCase_ :Tuple = middle_blocks[2]
lowerCAmelCase_ :Dict = renew_resnet_paths(lowercase__ )
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , config=lowercase__ )
lowerCAmelCase_ :List[str] = renew_resnet_paths(lowercase__ )
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , config=lowercase__ )
lowerCAmelCase_ :Optional[int] = renew_attention_paths(lowercase__ )
lowerCAmelCase_ :List[Any] = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , attention_paths_to_split=lowercase__ , config=lowercase__ )
for i in range(lowercase__ ):
lowerCAmelCase_ :str = i // (config["""num_res_blocks"""] + 1)
lowerCAmelCase_ :str = i % (config["""num_res_blocks"""] + 1)
lowerCAmelCase_ :List[Any] = [shave_segments(lowercase__ , 2 ) for name in output_blocks[i]]
lowerCAmelCase_ :Optional[Any] = {}
for layer in output_block_layers:
lowerCAmelCase_ :Optional[int] = layer.split(""".""" )[0], shave_segments(lowercase__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowercase__ )
else:
lowerCAmelCase_ :Dict = [layer_name]
if len(lowercase__ ) > 1:
lowerCAmelCase_ :str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
lowerCAmelCase_ :Dict = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
lowerCAmelCase_ :List[str] = renew_resnet_paths(lowercase__ )
lowerCAmelCase_ :Optional[Any] = renew_resnet_paths(lowercase__ )
lowerCAmelCase_ :List[str] = {"""old""": f"""output_blocks.{i}.0""", """new""": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , config=lowercase__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowerCAmelCase_ :int = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
lowerCAmelCase_ :int = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
lowerCAmelCase_ :Optional[Any] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowercase__ ) == 2:
lowerCAmelCase_ :str = []
if len(lowercase__ ):
lowerCAmelCase_ :List[Any] = renew_attention_paths(lowercase__ )
lowerCAmelCase_ :int = {
"""old""": f"""output_blocks.{i}.1""",
"""new""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowerCAmelCase_ :Optional[int] = {
f"""output_blocks.{i}.1.qkv.bias""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=lowercase__ , )
else:
lowerCAmelCase_ :List[str] = renew_resnet_paths(lowercase__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowerCAmelCase_ :Optional[int] = """.""".join(["""output_blocks""", str(lowercase__ ), path["""old"""]] )
lowerCAmelCase_ :Optional[int] = """.""".join(["""up_blocks""", str(lowercase__ ), """resnets""", str(lowercase__ ), path["""new"""]] )
lowerCAmelCase_ :Optional[int] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCAmelCase = json.loads(f.read())
__UpperCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCAmelCase = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
__UpperCAmelCase = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
__UpperCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 369
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1
| 0
|
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : int , lowerCamelCase_ : List[str]=-1 ):
"""simple docstring"""
UpperCamelCase = label_idx
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = mode.value
UpperCamelCase = os.path.join(lowerCamelCase_ , f"""{mode}.txt""" )
UpperCamelCase = 1
UpperCamelCase = []
with open(lowerCamelCase_ , encoding="""utf-8""" ) as f:
UpperCamelCase = []
UpperCamelCase = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
UpperCamelCase = []
UpperCamelCase = []
else:
UpperCamelCase = line.split(""" """ )
words.append(splits[0] )
if len(lowerCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
return examples
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
"""simple docstring"""
UpperCamelCase = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(lowerCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
UpperCamelCase = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(lowerCamelCase_ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str ):
"""simple docstring"""
if path:
with open(lowerCamelCase_ , """r""" ) as f:
UpperCamelCase = f.read().splitlines()
if "O" not in labels:
UpperCamelCase = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : List[Any] ):
"""simple docstring"""
super().__init__(label_idx=-2 )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
"""simple docstring"""
if path:
with open(lowerCamelCase_ , """r""" ) as f:
UpperCamelCase = f.read().splitlines()
if "O" not in labels:
UpperCamelCase = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = mode.value
UpperCamelCase = os.path.join(lowerCamelCase_ , f"""{mode}.txt""" )
UpperCamelCase = 1
UpperCamelCase = []
with open(lowerCamelCase_ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(lowerCamelCase_ ):
UpperCamelCase = []
UpperCamelCase = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
return examples
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
"""simple docstring"""
UpperCamelCase = 0
for sentence in parse_incr(lowerCamelCase_ ):
UpperCamelCase = preds_list[example_id]
UpperCamelCase = """"""
for token in sentence:
out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(lowerCamelCase_ )
example_id += 1
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str ):
"""simple docstring"""
if path:
with open(lowerCamelCase_ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 343
|
from __future__ import annotations
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCamelCase = result + left + right
return input_list
def lowercase( UpperCamelCase_ ) -> list:
'''simple docstring'''
if len(UpperCamelCase_ ) <= 1:
return input_list
UpperCamelCase = list(UpperCamelCase_ )
# iteration for two-way merging
UpperCamelCase = 2
while p <= len(UpperCamelCase_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ):
UpperCamelCase = i
UpperCamelCase = i + p - 1
UpperCamelCase = (low + high + 1) // 2
UpperCamelCase = merge(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# final merge of last two parts
if p * 2 >= len(UpperCamelCase_ ):
UpperCamelCase = i
UpperCamelCase = merge(UpperCamelCase_ , 0 , UpperCamelCase_ , len(UpperCamelCase_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
_SCREAMING_SNAKE_CASE = []
else:
_SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 343
| 1
|
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCAmelCase ( lowerCamelCase_ : Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , lowerCamelCase_ , )
if isinstance(lowerCamelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
__lowercase = [image]
if isinstance(image[0] , PIL.Image.Image ):
__lowercase , __lowercase = image[0].size
__lowercase , __lowercase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__lowercase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__lowercase = np.concatenate(lowerCamelCase_ , axis=0 )
__lowercase = np.array(lowerCamelCase_ ).astype(np.floataa ) / 2_5_5.0
__lowercase = image.transpose(0 , 3 , 1 , 2 )
__lowercase = 2.0 * image - 1.0
__lowercase = torch.from_numpy(lowerCamelCase_ )
elif isinstance(image[0] , torch.Tensor ):
__lowercase = torch.cat(lowerCamelCase_ , dim=0 )
return image
def _lowerCAmelCase ( lowerCamelCase_ : Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(lowerCamelCase_ , torch.Tensor ):
return mask
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
__lowercase = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__lowercase , __lowercase = mask[0].size
__lowercase , __lowercase = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
__lowercase = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__lowercase = np.concatenate(lowerCamelCase_ , axis=0 )
__lowercase = mask.astype(np.floataa ) / 2_5_5.0
__lowercase = 0
__lowercase = 1
__lowercase = torch.from_numpy(lowerCamelCase_ )
elif isinstance(mask[0] , torch.Tensor ):
__lowercase = torch.cat(lowerCamelCase_ , dim=0 )
return mask
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : UNetaDModel
a : RePaintScheduler
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCamelCase ,scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = 250 ,_lowerCamelCase = 0.0 ,_lowerCamelCase = 10 ,_lowerCamelCase = 10 ,_lowerCamelCase = None ,_lowerCamelCase = "pil" ,_lowerCamelCase = True ,):
'''simple docstring'''
__lowercase = image
__lowercase = _preprocess_image(_lowerCamelCase )
__lowercase = original_image.to(device=self.device ,dtype=self.unet.dtype )
__lowercase = _preprocess_mask(_lowerCamelCase )
__lowercase = mask_image.to(device=self.device ,dtype=self.unet.dtype )
__lowercase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_lowerCamelCase ,_lowerCamelCase ) and len(_lowerCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
__lowercase = original_image.shape
__lowercase = randn_tensor(_lowerCamelCase ,generator=_lowerCamelCase ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,self.device )
__lowercase = eta
__lowercase = self.scheduler.timesteps[0] + 1
__lowercase = generator[0] if isinstance(_lowerCamelCase ,_lowerCamelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__lowercase = self.unet(_lowerCamelCase ,_lowerCamelCase ).sample
# compute previous image: x_t -> x_t-1
__lowercase = self.scheduler.step(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__lowercase = self.scheduler.undo_step(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
__lowercase = t
__lowercase = (image / 2 + 0.5).clamp(0 ,1 )
__lowercase = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 364
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE = {
'''google/electra-small-generator''': 5_1_2,
'''google/electra-base-generator''': 5_1_2,
'''google/electra-large-generator''': 5_1_2,
'''google/electra-small-discriminator''': 5_1_2,
'''google/electra-base-discriminator''': 5_1_2,
'''google/electra-large-discriminator''': 5_1_2,
}
_SCREAMING_SNAKE_CASE = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : str = VOCAB_FILES_NAMES
a : Dict = PRETRAINED_VOCAB_FILES_MAP
a : int = PRETRAINED_INIT_CONFIGURATION
a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] = ElectraTokenizer
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=True ,_lowerCamelCase="[UNK]" ,_lowerCamelCase="[SEP]" ,_lowerCamelCase="[PAD]" ,_lowerCamelCase="[CLS]" ,_lowerCamelCase="[MASK]" ,_lowerCamelCase=True ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> List[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,do_lower_case=_lowerCamelCase ,unk_token=_lowerCamelCase ,sep_token=_lowerCamelCase ,pad_token=_lowerCamelCase ,cls_token=_lowerCamelCase ,mask_token=_lowerCamelCase ,tokenize_chinese_chars=_lowerCamelCase ,strip_accents=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,_lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,_lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,_lowerCamelCase ) != tokenize_chinese_chars
):
__lowercase = getattr(_lowerCamelCase ,normalizer_state.pop('''type''' ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**_lowerCamelCase )
__lowercase = do_lower_case
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=None ) -> str:
'''simple docstring'''
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 217
| 0
|
import math
from collections.abc import Callable
def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : Tuple , lowerCamelCase : Dict ):
UpperCamelCase_ : Optional[int] = xa
UpperCamelCase_ : List[str] = xa
while True:
if x_n == x_na or function(UpperCamelCase__ ) == function(UpperCamelCase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
UpperCamelCase_ : Optional[Any] = x_na - (
function(UpperCamelCase__ ) / ((function(UpperCamelCase__ ) - function(UpperCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
UpperCamelCase_ : Any = x_na
UpperCamelCase_ : List[str] = x_na
def __lowercase ( lowerCamelCase : Dict ):
return math.pow(UpperCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 175
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
while second != 0:
__lowerCamelCase = first & second
first ^= second
__lowerCamelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase =int(input("Enter the first number: ").strip())
__UpperCAmelCase =int(input("Enter the second number: ").strip())
print(f'{add(first, second) = }')
| 67
| 0
|
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__lowerCamelCase : List[str] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _a ):
"""simple docstring"""
a_ = "token-classification"
def __init__( self : Dict , __A : Dict ):
if type(_a ) == dict:
snake_case__ : Union[str, Any] = Namespace(**_a )
snake_case__ : Union[str, Any] = import_module("tasks" )
try:
snake_case__ : List[Any] = getattr(_a , hparams.task_type )
snake_case__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
snake_case__ : Union[str, Any] = self.token_classification_task.get_labels(hparams.labels )
snake_case__ : Union[str, Any] = CrossEntropyLoss().ignore_index
super().__init__(_a , len(self.labels ) , self.mode )
def _lowercase ( self : Union[str, Any] , **__A : List[Any] ):
return self.model(**_a )
def _lowercase ( self : List[Any] , __A : Dict , __A : int ):
snake_case__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
snake_case__ : Optional[Any] = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case__ : Optional[int] = self(**_a )
snake_case__ : str = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowercase ( self : int ):
snake_case__ : Dict = self.hparams
for mode in ["train", "dev", "test"]:
snake_case__ : str = self._feature_file(_a )
if os.path.exists(_a ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , _a )
snake_case__ : Tuple = torch.load(_a )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
snake_case__ : List[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , _a )
snake_case__ : Tuple = self.token_classification_task.convert_examples_to_features(
_a , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=_a , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , _a )
torch.save(_a , _a )
def _lowercase ( self : Optional[Any] , __A : int , __A : int , __A : bool = False ):
snake_case__ : Optional[int] = self._feature_file(_a )
logger.info("Loading features from cached file %s" , _a )
snake_case__ : int = torch.load(_a )
snake_case__ : Union[str, Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case__ : Union[str, Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
snake_case__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
snake_case__ : str = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
snake_case__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(_a , _a , _a , _a ) , batch_size=_a )
def _lowercase ( self : Union[str, Any] , __A : str , __A : Optional[Any] ):
"""Compute validation""" ""
snake_case__ : Optional[int] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
snake_case__ : str = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case__ : List[Any] = self(**_a )
snake_case__ : str = outputs[:2]
snake_case__ : Optional[Any] = logits.detach().cpu().numpy()
snake_case__ : Optional[int] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowercase ( self : int , __A : Union[str, Any] ):
snake_case__ : Dict = torch.stack([x["val_loss"] for x in outputs] ).mean()
snake_case__ : Optional[Any] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
snake_case__ : Any = np.argmax(_a , axis=2 )
snake_case__ : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 )
snake_case__ : Tuple = dict(enumerate(self.labels ) )
snake_case__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
snake_case__ : str = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
snake_case__ : str = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(_a , _a ),
"precision": precision_score(_a , _a ),
"recall": recall_score(_a , _a ),
"f1": fa_score(_a , _a ),
}
snake_case__ : Any = dict(results.items() )
snake_case__ : Union[str, Any] = results
return ret, preds_list, out_label_list
def _lowercase ( self : Optional[int] , __A : Tuple ):
# when stable
snake_case__ : Tuple = self._eval_end(_a )
snake_case__ : int = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowercase ( self : Tuple , __A : int ):
# updating to test_epoch_end instead of deprecated test_end
snake_case__ : Optional[int] = self._eval_end(_a )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
snake_case__ : Tuple = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowercase ( __A : Optional[int] , __A : Any ):
# Add NER specific options
BaseTransformer.add_model_specific_args(_a , _a )
parser.add_argument(
"--task_type" , default="NER" , type=_a , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_2_8 , type=_a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=_a , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=_a , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__lowerCamelCase : List[Any] = NERTransformer.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase : Optional[Any] = parser.parse_args()
__lowerCamelCase : List[Any] = NERTransformer(args)
__lowerCamelCase : Dict = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__lowerCamelCase : Tuple = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
__lowerCamelCase : Optional[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 370
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = SMALL_MODEL_IDENTIFIER
snake_case__ : Any = "pt"
snake_case__ : Any = "tf"
def _lowercase ( self : Union[str, Any] , __A : List[Any] ):
snake_case__ : int = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__A )
def _lowercase ( self : Optional[int] , __A : Tuple ):
snake_case__ : List[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__A )
model_tf.save_pretrained(__A )
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = "mock_framework"
# Framework provided - return whatever the user provides
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model , __A )
self.assertEqual(__A , __A )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
snake_case__ : Optional[int] = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
snake_case__ : int = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
def _lowercase ( self : Dict ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
snake_case__ : List[str] = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
snake_case__ : Tuple = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__A ):
snake_case__ : int = FeaturesManager.determine_framework(__A )
def _lowercase ( self : Dict ):
snake_case__ : Dict = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ):
snake_case__ : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
snake_case__ : Tuple = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_torch_available" , __A ):
snake_case__ : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_tf )
# Both in environment -> use PyTorch
snake_case__ : Dict = MagicMock(return_value=__A )
snake_case__ : Optional[int] = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ), patch(
"transformers.onnx.features.is_torch_available" , __A ):
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# Both not in environment -> raise error
snake_case__ : List[str] = MagicMock(return_value=__A )
snake_case__ : Optional[Any] = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ), patch(
"transformers.onnx.features.is_torch_available" , __A ):
with self.assertRaises(__A ):
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
| 286
| 0
|
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
while second != 0:
__UpperCAmelCase = first & second
first ^= second
__UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input('Enter the first number: ').strip())
A_ : int = int(input('Enter the second number: ').strip())
print(F"""{add(first, second) = }""")
| 333
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : int = logging.get_logger(__name__)
A_ : str = {'tokenizer_file': 'tokenizer.json'}
A_ : List[str] = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class A_ ( _a ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ["input_ids", "attention_mask"]
a__ = None
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__=False , lowercase__=False , **lowercase__ , ) -> Dict:
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
__UpperCAmelCase = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
__UpperCAmelCase = add_prefix_space
__UpperCAmelCase = pre_tok_class(**lowercase__ )
__UpperCAmelCase = add_prefix_space
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__UpperCAmelCase = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> List[int]:
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 333
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_UpperCamelCase = False
class lowerCamelCase__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
image=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type="""numpy""" ,).images
UpperCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 365
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_snake_case , config=_snake_case )
UpperCAmelCase = downstream_dict["""projector.weight"""]
UpperCAmelCase = downstream_dict["""projector.bias"""]
UpperCAmelCase = downstream_dict["""model.post_net.linear.weight"""]
UpperCAmelCase = downstream_dict["""model.post_net.linear.bias"""]
return model
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_snake_case , config=_snake_case )
UpperCAmelCase = downstream_dict["""model.linear.weight"""]
UpperCAmelCase = downstream_dict["""model.linear.bias"""]
return model
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = WavaVecaForXVector.from_pretrained(_snake_case , config=_snake_case )
UpperCAmelCase = downstream_dict["""connector.weight"""]
UpperCAmelCase = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
UpperCAmelCase = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = torch.load(_snake_case , map_location="""cpu""" )
UpperCAmelCase = checkpoint["""Downstream"""]
UpperCAmelCase = WavaVecaConfig.from_pretrained(_snake_case )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
_snake_case , return_attention_mask=_snake_case , do_normalize=_snake_case )
UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
UpperCAmelCase = convert_classification(_snake_case , _snake_case , _snake_case )
elif arch.endswith("""ForAudioFrameClassification""" ):
UpperCAmelCase = convert_diarization(_snake_case , _snake_case , _snake_case )
elif arch.endswith("""ForXVector""" ):
UpperCAmelCase = convert_xvector(_snake_case , _snake_case , _snake_case )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
_UpperCamelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 234
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =StableDiffusionPanoramaPipeline
__a =TEXT_TO_IMAGE_PARAMS
__a =TEXT_TO_IMAGE_BATCH_PARAMS
__a =TEXT_TO_IMAGE_IMAGE_PARAMS
__a =TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self : List[str] ):
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_a = DDIMScheduler()
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_a = CLIPTextModel(__a )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase__ ( self : int , __a : Tuple , __a : List[str]=0 ):
_a = torch.manual_seed(__a )
_a = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self : Dict ):
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = StableDiffusionPanoramaPipeline(**__a )
_a = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
_a = self.get_dummy_inputs(__a )
_a = sd_pipe(**__a ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self : Dict ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ ( self : List[str] ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = StableDiffusionPanoramaPipeline(**__a )
_a = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
_a = self.get_dummy_inputs(__a )
_a = "french fries"
_a = sd_pipe(**__a , negative_prompt=__a )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self : int ):
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = StableDiffusionPanoramaPipeline(**__a )
_a = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
_a = self.get_dummy_inputs(__a )
_a = sd_pipe(**__a , view_batch_size=2 )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self : str ):
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" )
_a = StableDiffusionPanoramaPipeline(**__a )
_a = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
_a = self.get_dummy_inputs(__a )
_a = sd_pipe(**__a ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self : int ):
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=__a )
_a = StableDiffusionPanoramaPipeline(**__a )
_a = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
_a = self.get_dummy_inputs(__a )
_a = sd_pipe(**__a ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : str , __a : Optional[int]=0 ):
_a = torch.manual_seed(__a )
_a = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self : str ):
_a = "stabilityai/stable-diffusion-2-base"
_a = DDIMScheduler.from_pretrained(__a , subfolder="scheduler" )
_a = StableDiffusionPanoramaPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
_a = self.get_inputs()
_a = pipe(**__a ).images
_a = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
_a = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def UpperCamelCase__ ( self : Any ):
_a = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=__a )
_a = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
_a = self.get_inputs()
_a = pipe(**__a ).images
_a = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
_a = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCamelCase__ ( self : Optional[int] ):
_a = 0
def callback_fn(__a : int , __a : int , __a : torch.FloatTensor ) -> None:
_a = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_a = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
_a = latents[0, -3:, -3:, -1]
_a = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_a = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
_a = latents[0, -3:, -3:, -1]
_a = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_a = False
_a = "stabilityai/stable-diffusion-2-base"
_a = DDIMScheduler.from_pretrained(__a , subfolder="scheduler" )
_a = StableDiffusionPanoramaPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a )
_a = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
_a = self.get_inputs()
pipe(**__a , callback=__a , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase__ ( self : Tuple ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a = "stabilityai/stable-diffusion-2-base"
_a = DDIMScheduler.from_pretrained(__a , subfolder="scheduler" )
_a = StableDiffusionPanoramaPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a )
_a = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_a = self.get_inputs()
_a = pipe(**__a )
_a = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 63
|
"""simple docstring"""
def __lowerCAmelCase ( lowercase : list[int] ) -> float:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
snake_case : List[str] = sum(lowercase ) / len(lowercase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__A = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__A = TaTokenizerFast
__A = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__A = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 365
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: Optional[int]=7 ) -> int:
'''simple docstring'''
__lowerCamelCase : List[str] = None
if token is not None:
__lowerCamelCase : List[Any] = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
__lowerCamelCase : Optional[Any] = "636036"
__lowerCamelCase : Dict = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
__lowerCamelCase : List[str] = requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
return result["workflow_runs"]
def lowercase_ ( _lowerCamelCase: Tuple ) -> int:
'''simple docstring'''
__lowerCamelCase : List[Any] = get_daily_ci_runs(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__lowerCamelCase : Optional[int] = workflow_run["id"]
break
return workflow_run_id
def lowercase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: int , _lowerCamelCase: str ) -> Any:
'''simple docstring'''
__lowerCamelCase : Any = get_last_daily_ci_runs(_lowerCamelCase )
if workflow_run_id is not None:
__lowerCamelCase : Dict = get_artifacts_links(worflow_run_id=_lowerCamelCase , token=_lowerCamelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__lowerCamelCase : int = artifacts_links[artifact_name]
download_artifact(
artifact_name=_lowerCamelCase , artifact_url=_lowerCamelCase , output_dir=_lowerCamelCase , token=_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Dict , _lowerCamelCase: int ) -> Any:
'''simple docstring'''
get_last_daily_ci_artifacts(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : int = {}
for artifact_name in artifact_names:
__lowerCamelCase : Tuple = os.path.join(_lowerCamelCase , F"""{artifact_name}.zip""" )
if os.path.isfile(_lowerCamelCase ):
__lowerCamelCase : Optional[int] = {}
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
with z.open(_lowerCamelCase ) as f:
__lowerCamelCase : Tuple = f.read().decode("UTF-8" )
return results
| 64
| 0
|
'''simple docstring'''
import requests
def lowerCamelCase__ ( _A , _A ):
a : List[Any] = {'''Content-Type''': '''application/json'''}
a : Optional[int] = requests.post(__UpperCamelCase , json={'text': message_body} , headers=__UpperCamelCase )
if response.status_code != 200:
a : int = (
'''Request to slack returned an error '''
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 297
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( snake_case ):
@staticmethod
@abstractmethod
def _lowerCamelCase ( UpperCamelCase_ ) -> Union[str, Any]:
raise NotImplementedError()
@abstractmethod
def _lowerCamelCase ( self ) -> str:
raise NotImplementedError()
| 249
| 0
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__UpperCAmelCase = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(vocab, range(len(vocab))))
__UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = Path(tmpdirname)
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__UpperCAmelCase = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__UpperCAmelCase = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__UpperCAmelCase = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__UpperCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__UpperCAmelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 368
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = 42
class lowerCamelCase__ ( _a , _a ):
@register_to_config
def __init__( self : Union[str, Any] , _a : int = 1_6 , _a : int = 8_8 , _a : Optional[int] = None , _a : Optional[int] = None , _a : int = 1 , _a : float = 0.0 , _a : int = 3_2 , _a : Optional[int] = None , _a : bool = False , _a : Optional[int] = None , _a : str = "geglu" , _a : bool = True , _a : bool = True , ):
super().__init__()
a__: List[Any] =num_attention_heads
a__: Tuple =attention_head_dim
a__: Dict =num_attention_heads * attention_head_dim
a__: List[Any] =in_channels
a__: Dict =torch.nn.GroupNorm(num_groups=_a , num_channels=_a , eps=1e-6 , affine=_a )
a__: str =nn.Linear(_a , _a )
# 3. Define transformers blocks
a__: Optional[int] =nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , cross_attention_dim=_a , activation_fn=_a , attention_bias=_a , double_self_attention=_a , norm_elementwise_affine=_a , )
for d in range(_a )
] )
a__: Any =nn.Linear(_a , _a )
def _lowerCamelCase ( self : List[str] , _a : str , _a : Optional[Any]=None , _a : int=None , _a : int=None , _a : Optional[int]=1 , _a : Tuple=None , _a : bool = True , ):
a__ , a__ , a__ , a__: int =hidden_states.shape
a__: str =batch_frames // num_frames
a__: Any =hidden_states
a__: Optional[int] =hidden_states[None, :].reshape(_a , _a , _a , _a , _a )
a__: Union[str, Any] =hidden_states.permute(0 , 2 , 1 , 3 , 4 )
a__: Tuple =self.norm(_a )
a__: Union[str, Any] =hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _a , _a )
a__: Dict =self.proj_in(_a )
# 2. Blocks
for block in self.transformer_blocks:
a__: str =block(
_a , encoder_hidden_states=_a , timestep=_a , cross_attention_kwargs=_a , class_labels=_a , )
# 3. Output
a__: Any =self.proj_out(_a )
a__: Optional[int] =(
hidden_states[None, None, :]
.reshape(_a , _a , _a , _a , _a )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
a__: Dict =hidden_states.reshape(_a , _a , _a , _a )
a__: List[str] =hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_a )
| 42
| 0
|
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
SCREAMING_SNAKE_CASE__ = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase=False , lowercase=False , lowercase=False , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase = None , **lowercase , ) -> None:
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
lowerCAmelCase = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase = """<|endoftext|>""" if eos_token is None else eos_token
lowerCAmelCase = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase = unk_token if pad_token is None else pad_token
lowerCAmelCase = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase = """<pad>""" if pad_token is None else pad_token
lowerCAmelCase = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase = re.compile(
f'[{"".join(map(lowercase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]' )
def __getstate__( self ) -> Optional[int]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , lowercase ) -> str:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _snake_case ( self ) -> int:
return len(self.sp_model )
def _snake_case ( self , lowercase ) -> str:
lowerCAmelCase = self.non_printing_characters_re.sub("""""" , lowercase )
# Normalize whitespaces
lowerCAmelCase = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
lowerCAmelCase = unicodedata.normalize("""NFC""" , lowercase )
return text
def _snake_case ( self , lowercase , **lowercase ) -> List[str]:
lowerCAmelCase = self.preprocess_text(lowercase )
return self.sp_model.encode(lowercase , out_type=lowercase )
def _snake_case ( self , lowercase ) -> int:
return self.sp_model.PieceToId(lowercase )
def _snake_case ( self , lowercase ) -> str:
return self.sp_model.IdToPiece(lowercase )
@staticmethod
def _snake_case ( lowercase ) -> str:
return out_string
def _snake_case ( self , lowercase ) -> str:
lowerCAmelCase = []
lowerCAmelCase = """"""
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(lowercase )
lowerCAmelCase = False
out_string += self.sp_model.decode(lowercase )
return out_string
def _snake_case ( self ) -> Dict[str, int]:
lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def _snake_case ( self , lowercase , lowercase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(lowercase , lowercase ):
lowerCAmelCase = self.preprocess_text(lowercase )
lowerCAmelCase = self.sp_model.encode(lowercase )
else:
lowerCAmelCase = [self.preprocess_text(lowercase ) for t in text]
lowerCAmelCase = self.sp_model.encode(lowercase )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase = torch.tensor(lowercase )
return token_ids
def _snake_case ( self , lowercase ) -> str:
return self.sp_model.decode(lowercase )
def _snake_case ( self , lowercase ) -> List[int]:
lowerCAmelCase = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowerCAmelCase = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowercase ) + f'{self.bos_token}Bot:'
)
return self.encode(text=lowercase )
| 46
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a =open # noqa: we just need to have a builtin inside this module to test it properly
| 73
| 0
|
"""simple docstring"""
import inspect
import unittest
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def snake_case_ ( self):
import diffusers
from diffusers.dependency_versions_table import deps
__SCREAMING_SNAKE_CASE = inspect.getmembers(lowerCAmelCase__ , inspect.isclass)
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__SCREAMING_SNAKE_CASE = """k-diffusion"""
elif backend == "invisible_watermark":
__SCREAMING_SNAKE_CASE = """invisible-watermark"""
assert backend in deps, f"{backend} is not in the deps table!"
| 365
|
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__magic_name__ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if got_ver is None or want_ver is None:
raise ValueError(
f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
f" reinstalling {pkg}." )
if not ops[op](version.parse(UpperCamelCase_ ) , version.parse(UpperCamelCase_ ) ):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = None ):
__SCREAMING_SNAKE_CASE = f"\n{hint}" if hint is not None else """"""
# non-versioned check
if re.match(r"""^[\w_\-\d]+$""" , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = requirement, None, None
else:
__SCREAMING_SNAKE_CASE = re.findall(r"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , UpperCamelCase_ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f" got {requirement}" )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = match[0]
__SCREAMING_SNAKE_CASE = want_full.split(""",""" ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE = {}
for w in want_range:
__SCREAMING_SNAKE_CASE = re.findall(r"""^([\s!=<>]{1,2})(.+)""" , UpperCamelCase_ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f" but got {requirement}" )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = match[0]
__SCREAMING_SNAKE_CASE = want_ver
if op not in ops:
raise ValueError(f"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE = """.""".join([str(UpperCamelCase_ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE = importlib.metadata.version(UpperCamelCase_ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(UpperCamelCase_ , UpperCamelCase_ )
| 255
| 0
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=128 , A_=32 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Any = parent
UpperCamelCase : Any = batch_size
UpperCamelCase : Any = seq_length
UpperCamelCase : Union[str, Any] = is_training
UpperCamelCase : Tuple = use_input_mask
UpperCamelCase : List[str] = use_token_type_ids
UpperCamelCase : Any = use_labels
UpperCamelCase : Dict = vocab_size
UpperCamelCase : str = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : int = num_attention_heads
UpperCamelCase : List[Any] = intermediate_size
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : Tuple = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Optional[int] = type_vocab_size
UpperCamelCase : int = type_sequence_label_size
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Dict = num_labels
UpperCamelCase : Tuple = num_choices
UpperCamelCase : int = scope
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : List[str] = None
if self.use_input_mask:
UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Tuple = None
UpperCamelCase : List[Any] = None
UpperCamelCase : Tuple = None
if self.use_labels:
UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def __UpperCamelCase( self ):
'''simple docstring'''
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase : Dict = True
UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = NezhaModel(config=__a )
model.to(__a )
model.eval()
UpperCamelCase : Dict = model(__a , attention_mask=__a , token_type_ids=__a )
UpperCamelCase : Dict = model(__a , token_type_ids=__a )
UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = NezhaModel(__a )
model.to(__a )
model.eval()
UpperCamelCase : str = model(
__a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
UpperCamelCase : str = model(
__a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , )
UpperCamelCase : Tuple = model(__a , attention_mask=__a , token_type_ids=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Any = NezhaForMaskedLM(config=__a )
model.to(__a )
model.eval()
UpperCamelCase : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = NezhaForNextSentencePrediction(config=__a )
model.to(__a )
model.eval()
UpperCamelCase : Union[str, Any] = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = NezhaForPreTraining(config=__a )
model.to(__a )
model.eval()
UpperCamelCase : Optional[Any] = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , next_sentence_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = NezhaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
UpperCamelCase : Any = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : Optional[int] = NezhaForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCamelCase : List[str] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.num_labels
UpperCamelCase : List[Any] = NezhaForTokenClassification(config=__a )
model.to(__a )
model.eval()
UpperCamelCase : Tuple = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.num_choices
UpperCamelCase : List[str] = NezhaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
UpperCamelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Union[str, Any] = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
UpperCamelCase : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_UpperCAmelCase :Any = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :List[Any] = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :int = True
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : List[str] = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class in get_values(__a ):
UpperCamelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a )
UpperCamelCase : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = NezhaModelTester(self )
UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a )
def __UpperCamelCase( self ):
'''simple docstring'''
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase : Dict = None
self.model_tester.create_and_check_model_as_decoder(
__a , __a , __a , __a , __a , __a , __a , __a , __a , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__a )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = NezhaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@slow
@require_torch_gpu
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
UpperCamelCase : List[str] = True
UpperCamelCase : List[str] = model_class(config=__a )
UpperCamelCase : int = self._prepare_for_class(__a , __a )
UpperCamelCase : List[Any] = torch.jit.trace(
__a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__a , os.path.join(__a , "bert.pt" ) )
UpperCamelCase : Optional[int] = torch.jit.load(os.path.join(__a , "bert.pt" ) , map_location=__a )
loaded(inputs_dict["input_ids"].to(__a ) , inputs_dict["attention_mask"].to(__a ) )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
UpperCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase : str = model(__a , attention_mask=__a )[0]
UpperCamelCase : Optional[Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __a )
UpperCamelCase : Optional[int] = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
UpperCamelCase : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase : int = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase : Union[str, Any] = model(__a , attention_mask=__a )[0]
UpperCamelCase : Union[str, Any] = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , __a )
UpperCamelCase : Tuple = torch.tensor(
[[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) )
| 52
|
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ):
UpperCAmelCase_ = 1.0 if scale is None else scale
UpperCAmelCase_ = 0.0 if loc is None else loc
super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] )
@property
def _lowercase (self : Union[str, Any] ):
return self.base_dist.mean * self.scale + self.loc
@property
def _lowercase (self : List[Any] ):
return self.base_dist.variance * self.scale**2
@property
def _lowercase (self : List[Any] ):
return self.variance.sqrt()
class __A ( nn.Module ):
def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ):
super().__init__(**__a )
UpperCAmelCase_ = args_dim
UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] )
UpperCAmelCase_ = domain_map
def _lowercase (self : List[str] , __a : torch.Tensor ):
UpperCAmelCase_ = [proj(__a ) for proj in self.proj]
return self.domain_map(*__a )
class __A ( nn.Module ):
def __init__(self : Union[str, Any] , __a : List[str] ):
super().__init__()
UpperCAmelCase_ = function
def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ):
return self.function(__a , *__a )
class __A :
a__ : type
a__ : int
a__ : Dict[str, int]
def __init__(self : List[Any] , __a : int = 1 ):
UpperCAmelCase_ = dim
UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def _lowercase (self : Any , __a : Any ):
if self.dim == 1:
return self.distribution_class(*__a )
else:
return Independent(self.distribution_class(*__a ) , 1 )
def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ):
UpperCAmelCase_ = self._base_distribution(__a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim )
@property
def _lowercase (self : Any ):
return () if self.dim == 1 else (self.dim,)
@property
def _lowercase (self : Dict ):
return len(self.event_shape )
@property
def _lowercase (self : Tuple ):
return 0.0
def _lowercase (self : List[str] , __a : int ):
return ParameterProjection(
in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _lowercase (self : Optional[int] , *__a : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def _lowercase (__a : torch.Tensor ):
return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
a__ : type = StudentT
@classmethod
def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase_ = 2.0 + cls.squareplus(__a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"loc": 1, "scale": 1}
a__ : type = Normal
@classmethod
def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"total_count": 1, "logits": 1}
a__ : type = NegativeBinomial
@classmethod
def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _lowercase (self : List[str] , __a : str ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a , logits=__a )
else:
return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 )
def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 1
| 0
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , *_a , _a=None , _a=None , **_a ) -> Dict:
"""simple docstring"""
super().__init__(*_a , **_a )
SCREAMING_SNAKE_CASE__ : List[Any] = eval_examples
SCREAMING_SNAKE_CASE__ : Any = post_process_function
def _a ( self , _a=None , _a=None , _a=None , _a = "eval" ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE__ : List[str] = self.get_eval_dataloader(_a )
SCREAMING_SNAKE_CASE__ : Dict = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE__ : Dict = self.compute_metrics
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
SCREAMING_SNAKE_CASE__ : Dict = time.time()
try:
SCREAMING_SNAKE_CASE__ : str = eval_loop(
_a , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
SCREAMING_SNAKE_CASE__ : Optional[int] = compute_metrics
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.post_process_function(_a , _a , output.predictions )
SCREAMING_SNAKE_CASE__ : Any = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE__ : Any = metrics.pop(_a )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE__ : Tuple = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE__ : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , _a )
return metrics
def _a ( self , _a , _a , _a=None , _a = "test" ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.get_test_dataloader(_a )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE__ : Dict = self.compute_metrics
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
SCREAMING_SNAKE_CASE__ : Dict = time.time()
try:
SCREAMING_SNAKE_CASE__ : Optional[int] = eval_loop(
_a , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , )
finally:
SCREAMING_SNAKE_CASE__ : Dict = compute_metrics
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE__ : Tuple = self.post_process_function(_a , _a , output.predictions , """predict""" )
SCREAMING_SNAKE_CASE__ : str = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
SCREAMING_SNAKE_CASE__ : str = metrics.pop(_a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_a )
| 357
|
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
a :Optional[int] = True
from torch.cuda.amp import autocast
a :str = logging.getLogger(__name__)
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_SCREAMING_SNAKE_CASE :Optional[bool] = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""})
_SCREAMING_SNAKE_CASE :Optional[bool] = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to log verbose messages or not."""} , )
_SCREAMING_SNAKE_CASE :Optional[float] = field(
default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""})
_SCREAMING_SNAKE_CASE :Optional[float] = field(
default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""})
_SCREAMING_SNAKE_CASE :Optional[float] = field(
default=0.99_99_95 , metadata={"""help""": """Decay of gumbel temperature during training."""})
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.WARNING
if model_args.verbose_logging:
SCREAMING_SNAKE_CASE__ : int = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
SCREAMING_SNAKE_CASE__ : int = logging.INFO
logger.setLevel(__lowerCAmelCase )
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = field(
default=UpperCamelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , )
_SCREAMING_SNAKE_CASE :bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=1 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=UpperCamelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_SCREAMING_SNAKE_CASE :Optional[float] = field(
default=20.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""})
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :WavaVecaForPreTraining
_SCREAMING_SNAKE_CASE :WavaVecaFeatureExtractor
_SCREAMING_SNAKE_CASE :Union[bool, str] = "longest"
_SCREAMING_SNAKE_CASE :Optional[int] = None
_SCREAMING_SNAKE_CASE :Optional[int] = None
def __call__( self , _a ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feature_extractor.pad(
_a , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
SCREAMING_SNAKE_CASE__ : Any = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
SCREAMING_SNAKE_CASE__ : List[Any] = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
SCREAMING_SNAKE_CASE__ : List[str] = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE__ : List[str] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
SCREAMING_SNAKE_CASE__ : str = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_a , min_masks=2 , )
return batch
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , *_a , _a=1 , _a=0 , _a=1.0 , **_a ) -> str:
"""simple docstring"""
super().__init__(*_a , **_a )
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : Dict = max_gumbel_temp
SCREAMING_SNAKE_CASE__ : str = min_gumbel_temp
SCREAMING_SNAKE_CASE__ : Optional[Any] = gumbel_temp_decay
def _a ( self , _a , _a ) -> torch.Tensor:
"""simple docstring"""
model.train()
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_inputs(_a )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.compute_loss(_a , _a )
else:
SCREAMING_SNAKE_CASE__ : str = self.compute_loss(_a , _a )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE__ : str = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE__ : Dict = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE__ : str = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_a ).backward()
elif self.use_apex:
with amp.scale_loss(_a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_a )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def _lowercase ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = parser.parse_args_into_dataclasses()
configure_logger(__lowerCAmelCase , __lowerCAmelCase )
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE__ : Any = DatasetDict()
SCREAMING_SNAKE_CASE__ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE__ : str = DatasetDict()
SCREAMING_SNAKE_CASE__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__lowerCAmelCase )
def prepare_dataset(__lowerCAmelCase ):
# check that all files have the correct sampling rate
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
SCREAMING_SNAKE_CASE__ : Union[str, Any] = datasets.map(
__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
SCREAMING_SNAKE_CASE__ : List[str] = vectorized_datasets.filter(
lambda __lowerCAmelCase : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__lowerCAmelCase ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
SCREAMING_SNAKE_CASE__ : str = vectorized_datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaForPreTraining(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = DataCollatorForWavaVecaPretraining(model=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = WavaVecaPreTrainer(
model=__lowerCAmelCase , data_collator=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=__lowerCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 56
| 0
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""vqvae"""]
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Mel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, DDPMScheduler] , ):
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , mel=SCREAMING_SNAKE_CASE , vqvae=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
return 50 if isinstance(self.scheduler , SCREAMING_SNAKE_CASE ) else 1_000
@torch.no_grad()
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : np.ndarray = None , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = None , SCREAMING_SNAKE_CASE : torch.Generator = None , SCREAMING_SNAKE_CASE : float = 0 , SCREAMING_SNAKE_CASE : float = 0 , SCREAMING_SNAKE_CASE : torch.Generator = None , SCREAMING_SNAKE_CASE : float = 0 , SCREAMING_SNAKE_CASE : torch.Tensor = None , SCREAMING_SNAKE_CASE : torch.Tensor = None , SCREAMING_SNAKE_CASE : str=True , ):
lowercase__ : int = steps or self.get_default_steps()
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowercase__ : int = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowercase__ : List[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=SCREAMING_SNAKE_CASE , device=self.device , )
lowercase__ : Optional[int] = noise
lowercase__ : Tuple = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.mel.audio_slice_to_image(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
lowercase__ : Optional[int] = (input_image / 255) * 2 - 1
lowercase__ : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowercase__ : List[Any] = self.vqvae.encode(torch.unsqueeze(SCREAMING_SNAKE_CASE , 0 ) ).latent_dist.sample(
generator=SCREAMING_SNAKE_CASE )[0]
lowercase__ : List[Any] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowercase__ : str = self.scheduler.add_noise(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.scheduler.timesteps[start_step - 1] )
lowercase__ : Any = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowercase__ : Optional[int] = int(mask_start_secs * pixels_per_second )
lowercase__ : List[Any] = int(mask_end_secs * pixels_per_second )
lowercase__ : List[str] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , SCREAMING_SNAKE_CASE ):
lowercase__ : List[Any] = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )["sample"]
else:
lowercase__ : Any = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )["sample"]
if isinstance(self.scheduler , SCREAMING_SNAKE_CASE ):
lowercase__ : Union[str, Any] = self.scheduler.step(
model_output=SCREAMING_SNAKE_CASE , timestep=SCREAMING_SNAKE_CASE , sample=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , )["prev_sample"]
else:
lowercase__ : Optional[int] = self.scheduler.step(
model_output=SCREAMING_SNAKE_CASE , timestep=SCREAMING_SNAKE_CASE , sample=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , )["prev_sample"]
if mask is not None:
if mask_start > 0:
lowercase__ : Optional[Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
lowercase__ : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowercase__ : List[Any] = 1 / self.vqvae.config.scaling_factor * images
lowercase__ : List[Any] = self.vqvae.decode(SCREAMING_SNAKE_CASE )["sample"]
lowercase__ : Tuple = (images / 2 + 0.5).clamp(0 , 1 )
lowercase__ : Tuple = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowercase__ : Tuple = (images * 255).round().astype("uint8" )
lowercase__ : int = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(SCREAMING_SNAKE_CASE , mode="RGB" ).convert("L" ) for _ in images) )
lowercase__ : str = [self.mel.image_to_audio(SCREAMING_SNAKE_CASE ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(SCREAMING_SNAKE_CASE )[:, np.newaxis, :] ) , **ImagePipelineOutput(SCREAMING_SNAKE_CASE ) )
@torch.no_grad()
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : List[Image.Image] , SCREAMING_SNAKE_CASE : int = 50 ):
assert isinstance(self.scheduler , SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
lowercase__ : Union[str, Any] = (sample / 255) * 2 - 1
lowercase__ : Any = torch.Tensor(SCREAMING_SNAKE_CASE ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowercase__ : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowercase__ : List[Any] = self.scheduler.alphas_cumprod[t]
lowercase__ : Optional[int] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowercase__ : Union[str, Any] = 1 - alpha_prod_t
lowercase__ : List[str] = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )["sample"]
lowercase__ : List[Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowercase__ : Optional[Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowercase__ : Any = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def snake_case ( SCREAMING_SNAKE_CASE : torch.Tensor , SCREAMING_SNAKE_CASE : torch.Tensor , SCREAMING_SNAKE_CASE : float ):
lowercase__ : Tuple = acos(torch.dot(torch.flatten(SCREAMING_SNAKE_CASE ) , torch.flatten(SCREAMING_SNAKE_CASE ) ) / torch.norm(SCREAMING_SNAKE_CASE ) / torch.norm(SCREAMING_SNAKE_CASE ) )
return sin((1 - alpha) * theta ) * xa / sin(SCREAMING_SNAKE_CASE ) + sin(alpha * theta ) * xa / sin(SCREAMING_SNAKE_CASE )
| 130
|
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True ):
"""simple docstring"""
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowercase__ : Union[str, Any] = timm.create_model("levit_128s" , pretrained=lowerCamelCase__ )
else:
lowercase__ : Union[str, Any] = timm.create_model("levit_128" , pretrained=lowerCamelCase__ )
if hidden_sizes == 192:
lowercase__ : Dict = timm.create_model("levit_192" , pretrained=lowerCamelCase__ )
if hidden_sizes == 256:
lowercase__ : Optional[Any] = timm.create_model("levit_256" , pretrained=lowerCamelCase__ )
if hidden_sizes == 384:
lowercase__ : List[str] = timm.create_model("levit_384" , pretrained=lowerCamelCase__ )
from_model.eval()
lowercase__ : Union[str, Any] = LevitForImageClassificationWithTeacher(lowerCamelCase__ ).eval()
lowercase__ : Tuple = OrderedDict()
lowercase__ : Dict = from_model.state_dict()
lowercase__ : Union[str, Any] = list(from_model.state_dict().keys() )
lowercase__ : Any = list(our_model.state_dict().keys() )
print(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Union[str, Any] = weights[og_keys[i]]
our_model.load_state_dict(lowerCamelCase__ )
lowercase__ : List[str] = torch.randn((2, 3, 224, 224) )
lowercase__ : Optional[Any] = from_model(lowerCamelCase__ )
lowercase__ : Optional[Any] = our_model(lowerCamelCase__ ).logits
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ ), "The model logits don't match the original one."
lowercase__ : Optional[Any] = name
print(lowerCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True ):
"""simple docstring"""
lowercase__ : Optional[Any] = "imagenet-1k-id2label.json"
lowercase__ : str = 1_000
lowercase__ : Any = (1, num_labels)
lowercase__ : Optional[Any] = "huggingface/label-files"
lowercase__ : Optional[Any] = num_labels
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : Dict = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Optional[Any] = partial(lowerCamelCase__ , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ )
lowercase__ : List[str] = {
"levit-128S": 128,
"levit-128": 128,
"levit-192": 192,
"levit-256": 256,
"levit-384": 384,
}
lowercase__ : int = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowerCamelCase__ , names_to_config[model_name] , lowerCamelCase__ , lowerCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 130
| 1
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase__ : List[str] ={
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def __lowercase ( a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(a__ , a__ )
lowerCAmelCase__ : List[Any] ={
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def __lowercase ( a__ ) -> int:
__SCREAMING_SNAKE_CASE = list(s_dict.keys() )
for key in keys:
__SCREAMING_SNAKE_CASE = key
for k, v in WHISPER_MAPPING.items():
if k in key:
__SCREAMING_SNAKE_CASE = new_key.replace(a__ , a__ )
print(f"""{key} -> {new_key}""" )
__SCREAMING_SNAKE_CASE = s_dict.pop(a__ )
return s_dict
def __lowercase ( a__ ) -> Optional[int]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = emb.weight.shape
__SCREAMING_SNAKE_CASE = nn.Linear(a__ , a__ , bias=a__ )
__SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def __lowercase ( a__ , a__ ) -> bytes:
os.makedirs(a__ , exist_ok=a__ )
__SCREAMING_SNAKE_CASE = os.path.basename(a__ )
__SCREAMING_SNAKE_CASE = url.split('/' )[-2]
__SCREAMING_SNAKE_CASE = os.path.join(a__ , a__ )
if os.path.exists(a__ ) and not os.path.isfile(a__ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(a__ ):
__SCREAMING_SNAKE_CASE = open(a__ , 'rb' ).read()
if hashlib.shaaaa(a__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(a__ ) as source, open(a__ , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=a__ , unit_divisor=10_24 ) as loop:
while True:
__SCREAMING_SNAKE_CASE = source.read(81_92 )
if not buffer:
break
output.write(a__ )
loop.update(len(a__ ) )
__SCREAMING_SNAKE_CASE = open(a__ , 'rb' ).read()
if hashlib.shaaaa(a__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def __lowercase ( a__ , a__ ) -> Union[str, Any]:
if ".pt" not in checkpoint_path:
__SCREAMING_SNAKE_CASE = _download(_MODELS[checkpoint_path] )
else:
__SCREAMING_SNAKE_CASE = torch.load(a__ , map_location='cpu' )
__SCREAMING_SNAKE_CASE = original_checkpoint['dims']
__SCREAMING_SNAKE_CASE = original_checkpoint['model_state_dict']
__SCREAMING_SNAKE_CASE = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(a__ )
rename_keys(a__ )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = state_dict['decoder.layers.0.fc1.weight'].shape[0]
__SCREAMING_SNAKE_CASE = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=a__ , decoder_ffn_dim=a__ , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
__SCREAMING_SNAKE_CASE = WhisperForConditionalGeneration(a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model.model.load_state_dict(a__ , strict=a__ )
if len(a__ ) > 0 and not set(a__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
__SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__SCREAMING_SNAKE_CASE = proj_out_weights
model.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] =argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase__ : Optional[int] =parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 118
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = '''Wav2Vec2FeatureExtractor'''
UpperCamelCase__ : Union[str, Any] = '''AutoTokenizer'''
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__(_A , _A )
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
@classmethod
def _A ( cls , _A , **_A ):
'''simple docstring'''
try:
return super().from_pretrained(_A , **_A )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , _A , )
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer.from_pretrained(_A , **_A )
return cls(feature_extractor=_A , tokenizer=_A )
def __call__( self , *_A , **_A ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__SCREAMING_SNAKE_CASE = kwargs.pop('raw_speech' )
else:
__SCREAMING_SNAKE_CASE = kwargs.pop('audio' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('sampling_rate' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('text' , _A )
if len(_A ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__SCREAMING_SNAKE_CASE = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__SCREAMING_SNAKE_CASE = encodings['input_ids']
return inputs
def _A ( self , *_A , **_A ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A )
__SCREAMING_SNAKE_CASE = kwargs.pop('input_features' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('labels' , _A )
if len(_A ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if input_features is not None:
__SCREAMING_SNAKE_CASE = self.feature_extractor.pad(_A , *_A , **_A )
if labels is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer.pad(_A , **_A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__SCREAMING_SNAKE_CASE = labels['input_ids']
return input_features
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _A ( self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer
yield
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
| 118
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=6 , _a=17 , _a=23 , _a=11 , _a=True , ):
__a = parent
__a = batch_size
__a = seq_length
__a = act_dim
__a = state_dim
__a = hidden_size
__a = max_length
__a = is_training
def __UpperCAmelCase ( self ):
__a = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__a = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__a = floats_tensor((self.batch_size, self.seq_length, 1) )
__a = floats_tensor((self.batch_size, self.seq_length, 1) )
__a = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
__a = random_attention_mask((self.batch_size, self.seq_length) )
__a = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __UpperCAmelCase ( self ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , ):
__a = DecisionTransformerModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a , _a , _a , _a , _a , _a )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = (DecisionTransformerModel,) if is_torch_available() else ()
__UpperCAmelCase : Union[str, Any] = ()
__UpperCAmelCase : Any = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__UpperCAmelCase : int = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : int = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
__UpperCAmelCase : int = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[str] = False
def __UpperCAmelCase ( self ):
__a = DecisionTransformerModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = DecisionTransformerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(_a )] , _a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = 2 # number of steps of autoregressive prediction we will perform
__a = 10 # defined by the RL environment, may be normalized
__a = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
__a = model.to(_a )
__a = model.config
torch.manual_seed(0 )
__a = torch.randn(1 , 1 , config.state_dim ).to(device=_a , dtype=torch.floataa ) # env.reset()
__a = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=_a )
__a = torch.tensor(_a , device=_a , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__a = state
__a = torch.zeros(1 , 0 , config.act_dim , device=_a , dtype=torch.floataa )
__a = torch.zeros(1 , 0 , device=_a , dtype=torch.floataa )
__a = torch.tensor(0 , device=_a , dtype=torch.long ).reshape(1 , 1 )
for step in range(_a ):
__a = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_a )] , dim=1 )
__a = torch.cat([rewards, torch.zeros(1 , 1 , device=_a )] , dim=1 )
__a = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__a , __a , __a = model(
states=_a , actions=_a , rewards=_a , returns_to_go=_a , timesteps=_a , attention_mask=_a , return_dict=_a , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
__a , __a , __a , __a = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=_a , dtype=torch.floataa ),
1.0,
False,
{},
)
__a = action_pred[0, -1]
__a = torch.cat([states, state] , dim=1 )
__a = returns_to_go[0, -1] - reward
__a = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__a = torch.cat(
[timesteps, torch.ones((1, 1) , device=_a , dtype=torch.long ) * (step + 1)] , dim=1 )
| 45
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Any = 'yolos'
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[512, 864] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : str = num_attention_heads
lowercase : str = intermediate_size
lowercase : Dict = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : List[Any] = initializer_range
lowercase : Optional[int] = layer_norm_eps
lowercase : str = image_size
lowercase : Dict = patch_size
lowercase : str = num_channels
lowercase : Optional[int] = qkv_bias
lowercase : List[str] = num_detection_tokens
lowercase : List[str] = use_mid_position_embeddings
lowercase : Dict = auxiliary_loss
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : Any = bbox_cost
lowercase : int = giou_cost
# Loss coefficients
lowercase : Dict = bbox_loss_coefficient
lowercase : Optional[Any] = giou_loss_coefficient
lowercase : Tuple = eos_coefficient
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = version.parse('1.11' )
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
@property
def __lowerCamelCase ( self ):
return 12
| 337
| 0
|
'''simple docstring'''
import os
def a ( ) -> int:
'''simple docstring'''
with open(os.path.dirname(__a ) + '''/grid.txt''' ) as f:
UpperCamelCase__ :Dict = [] # noqa: E741
for _ in range(20 ):
l.append([int(__a ) for x in f.readline().split()] )
UpperCamelCase__ :Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCamelCase__ :Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCamelCase__ :Optional[int] = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCamelCase__ :Optional[int] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCamelCase__ :List[str] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCamelCase__ :Dict = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCamelCase__ :Union[str, Any] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
UpperCamelCase__ :Tuple = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCamelCase__ :Tuple = temp
return maximum
if __name__ == "__main__":
print(solution())
| 219
|
'''simple docstring'''
from __future__ import annotations
__snake_case = list[list[int]]
# assigning initial values to the grid
__snake_case = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__snake_case = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a ( __a , __a , __a , __a ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a ( __a ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a ( __a ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(__a ):
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__a , __a , __a , __a ):
UpperCamelCase__ :Tuple = digit
if sudoku(__a ) is not None:
return grid
UpperCamelCase__ :Union[str, Any] = 0
return None
def a ( __a ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(__a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__snake_case = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 219
| 1
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
A__ : int = logging.get_logger(__name__)
def UpperCamelCase( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ):
lowerCAmelCase_ : List[str] = WavaVecaForSequenceClassification.from_pretrained(__lowerCAmelCase ,config=__lowerCAmelCase )
lowerCAmelCase_ : Tuple = downstream_dict["projector.weight"]
lowerCAmelCase_ : str = downstream_dict["projector.bias"]
lowerCAmelCase_ : Optional[int] = downstream_dict["model.post_net.linear.weight"]
lowerCAmelCase_ : List[str] = downstream_dict["model.post_net.linear.bias"]
return model
def UpperCamelCase( __UpperCamelCase : List[str] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[Any] ):
lowerCAmelCase_ : List[Any] = WavaVecaForAudioFrameClassification.from_pretrained(__lowerCAmelCase ,config=__lowerCAmelCase )
lowerCAmelCase_ : List[str] = downstream_dict["model.linear.weight"]
lowerCAmelCase_ : Dict = downstream_dict["model.linear.bias"]
return model
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[Any] ):
lowerCAmelCase_ : str = WavaVecaForXVector.from_pretrained(__lowerCAmelCase ,config=__lowerCAmelCase )
lowerCAmelCase_ : int = downstream_dict["connector.weight"]
lowerCAmelCase_ : Optional[Any] = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCAmelCase_ : Optional[Any] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
lowerCAmelCase_ : int = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
lowerCAmelCase_ : List[str] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
lowerCAmelCase_ : Optional[int] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
lowerCAmelCase_ : int = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
lowerCAmelCase_ : str = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
lowerCAmelCase_ : Optional[Any] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def UpperCamelCase( __UpperCamelCase : Dict ,__UpperCamelCase : Dict ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[str] ):
lowerCAmelCase_ : Tuple = torch.load(__lowerCAmelCase ,map_location='''cpu''' )
lowerCAmelCase_ : Tuple = checkpoint["Downstream"]
lowerCAmelCase_ : Optional[int] = WavaVecaConfig.from_pretrained(__lowerCAmelCase )
lowerCAmelCase_ : List[str] = WavaVecaFeatureExtractor.from_pretrained(
__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,do_normalize=__lowerCAmelCase )
lowerCAmelCase_ : Any = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
lowerCAmelCase_ : Optional[int] = convert_classification(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
elif arch.endswith('''ForAudioFrameClassification''' ):
lowerCAmelCase_ : Any = convert_diarization(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
elif arch.endswith('''ForXVector''' ):
lowerCAmelCase_ : List[str] = convert_xvector(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
lowerCAmelCase_ : Optional[Any] = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(__lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
A__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
A__ : Any = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 103
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCamelCase__ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
lowerCamelCase__ = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
lowerCamelCase__ = '▁'
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any]="<s>" , lowerCamelCase__ : Dict="</s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : List[Any]="<s>" , lowerCamelCase__ : List[str]="<unk>" , lowerCamelCase__ : str="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Optional[Dict[str, Any]] = None , **lowerCamelCase__ : Dict , ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
_UpperCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
_UpperCAmelCase : Union[str, Any] = vocab_file
_UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[int] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_UpperCAmelCase : List[str] = len(self.sp_model ) - 1
_UpperCAmelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
_UpperCAmelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [self.sep_token_id]
_UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
return len(self.sp_model )
def lowerCAmelCase__ ( self : int ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Optional[Any] = self.sp_model.PieceToId(lowerCamelCase__ )
return spm_id if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Any ) ->Optional[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Optional[Any] = ""
_UpperCAmelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Union[str, Any] = []
else:
current_sub_tokens.append(lowerCamelCase__ )
_UpperCAmelCase : int = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def __getstate__( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.__dict__.copy()
_UpperCAmelCase : Optional[int] = None
return state
def __setstate__( self : Any , lowerCamelCase__ : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , "wb" ) as fi:
_UpperCAmelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 234
| 0
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger("""transformers.models.speecht5""")
UpperCamelCase__ = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
UpperCamelCase__ = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
UpperCamelCase__ = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
UpperCamelCase__ = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
UpperCamelCase__ = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
UpperCamelCase__ = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
UpperCamelCase__ = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
UpperCamelCase__ = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
UpperCamelCase__ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCamelCase__ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase__ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase__ = []
UpperCamelCase__ = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
UpperCamelCase__ = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
UpperCamelCase__ = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
UpperCamelCase__ = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
for attribute in key.split("." ):
__lowerCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
__lowerCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
__lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
elif weight_type == "running_mean":
__lowerCAmelCase = value
elif weight_type == "running_var":
__lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowerCAmelCase , __lowerCAmelCase = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ):
__lowerCAmelCase = []
if task == "s2t":
__lowerCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCAmelCase = MAPPING_S2T
__lowerCAmelCase = IGNORE_KEYS_S2T
elif task == "t2s":
__lowerCAmelCase = None
__lowerCAmelCase = MAPPING_T2S
__lowerCAmelCase = IGNORE_KEYS_T2S
elif task == "s2s":
__lowerCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCAmelCase = MAPPING_S2S
__lowerCAmelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
logger.info(F"""{name} was ignored""" )
continue
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == "group" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__lowerCAmelCase , __lowerCAmelCase = key.split(".*." )
if prefix in name and suffix in name:
__lowerCAmelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(SCREAMING_SNAKE_CASE_ )[0].split("." )[-2]
__lowerCAmelCase = mapped_key.replace("*" , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
__lowerCAmelCase = "weight_g"
elif "weight_v" in name:
__lowerCAmelCase = "weight_v"
elif "bias" in name:
__lowerCAmelCase = "bias"
elif "weight" in name:
__lowerCAmelCase = "weight"
elif "running_mean" in name:
__lowerCAmelCase = "running_mean"
elif "running_var" in name:
__lowerCAmelCase = "running_var"
elif "num_batches_tracked" in name:
__lowerCAmelCase = "num_batches_tracked"
else:
__lowerCAmelCase = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict ):
__lowerCAmelCase = full_name.split("conv_layers." )[-1]
__lowerCAmelCase = name.split("." )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : str=None , ):
if config_path is not None:
__lowerCAmelCase = SpeechTaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
__lowerCAmelCase = SpeechTaConfig()
if task == "s2t":
__lowerCAmelCase = config.max_text_positions
__lowerCAmelCase = SpeechTaForSpeechToText(SCREAMING_SNAKE_CASE_ )
elif task == "t2s":
__lowerCAmelCase = 18_76
__lowerCAmelCase = 6_00
__lowerCAmelCase = config.max_speech_positions
__lowerCAmelCase = SpeechTaForTextToSpeech(SCREAMING_SNAKE_CASE_ )
elif task == "s2s":
__lowerCAmelCase = 18_76
__lowerCAmelCase = config.max_speech_positions
__lowerCAmelCase = SpeechTaForSpeechToSpeech(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
__lowerCAmelCase = SpeechTaTokenizer(SCREAMING_SNAKE_CASE_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken("<mask>" , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
__lowerCAmelCase = SpeechTaFeatureExtractor()
__lowerCAmelCase = SpeechTaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ )
recursively_load_weights(fairseq_checkpoint["model"] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ )
model.push_to_hub(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
UpperCamelCase__ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 102
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ = 16
UpperCamelCase__ = 32
def _a ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 16 ):
__lowerCAmelCase = AutoTokenizer.from_pretrained("bert-base-cased" )
__lowerCAmelCase = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE_ : str ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase = 8
else:
__lowerCAmelCase = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding="longest" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="pt" , )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase__ = mocked_dataloaders # noqa: F811
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE_ ) == "1":
__lowerCAmelCase = 2
# New Code #
__lowerCAmelCase = int(args.gradient_accumulation_steps )
# Initialize accelerator
__lowerCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config["lr"]
__lowerCAmelCase = int(config["num_epochs"] )
__lowerCAmelCase = int(config["seed"] )
__lowerCAmelCase = int(config["batch_size"] )
__lowerCAmelCase = evaluate.load("glue" , "mrpc" )
set_seed(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
__lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
__lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE_ )
def _a ( ):
__lowerCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 102
| 1
|
import os
from datetime import datetime as dt
from github import Github
__lowerCamelCase : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def __SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Github(os.environ["""GITHUB_TOKEN"""] )
SCREAMING_SNAKE_CASE__ = g.get_repo("""huggingface/diffusers""" )
SCREAMING_SNAKE_CASE__ = repo.get_issues(state="""open""" )
for issue in open_issues:
SCREAMING_SNAKE_CASE__ = sorted(issue.get_comments() , key=lambda __UpperCamelCase : i.created_at , reverse=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = comments[0] if len(__lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 219
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) ->Union[str, Any]:
for attribute in key.split(""".""" ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
_SCREAMING_SNAKE_CASE = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
_SCREAMING_SNAKE_CASE = value
else:
_SCREAMING_SNAKE_CASE = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) ->Any:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
_SCREAMING_SNAKE_CASE = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
_SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
_SCREAMING_SNAKE_CASE = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
_SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
_SCREAMING_SNAKE_CASE = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
_SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
_SCREAMING_SNAKE_CASE = """weight_g"""
elif "weight_v" in name:
_SCREAMING_SNAKE_CASE = """weight_v"""
elif "weight" in name:
_SCREAMING_SNAKE_CASE = """weight"""
elif "bias" in name:
_SCREAMING_SNAKE_CASE = """bias"""
else:
_SCREAMING_SNAKE_CASE = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ) ->Union[str, Any]:
_SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1]
_SCREAMING_SNAKE_CASE = name.split(""".""" )
_SCREAMING_SNAKE_CASE = int(items[0] )
_SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Union[str, Any]=True ) ->Optional[int]:
if config_path is not None:
_SCREAMING_SNAKE_CASE = HubertConfig.from_pretrained(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE = HubertConfig()
if is_finetuned:
if dict_path:
_SCREAMING_SNAKE_CASE = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_SCREAMING_SNAKE_CASE = target_dict.pad_index
_SCREAMING_SNAKE_CASE = target_dict.bos_index
_SCREAMING_SNAKE_CASE = target_dict.eos_index
_SCREAMING_SNAKE_CASE = len(target_dict.symbols )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False
_SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = HubertForCTC(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE = HubertModel(__lowerCamelCase )
if is_finetuned:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowercase_ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 58
| 0
|
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: float , lowerCAmelCase__: int ):
"""simple docstring"""
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
UpperCAmelCase_: Union[str, Any] = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCAmelCase_: Any = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
from __future__ import annotations
def lowerCAmelCase_ (lowerCAmelCase__: list[float] ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = 0.00
UpperCAmelCase_: List[str] = 0
for resistor in resistors:
if resistor <= 0:
UpperCAmelCase_: Dict = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(lowerCAmelCase__ )
first_sum += 1 / float(lowerCAmelCase__ )
index += 1
return 1 / first_sum
def lowerCAmelCase_ (lowerCAmelCase__: list[float] ):
"""simple docstring"""
UpperCAmelCase_: Any = 0.00
UpperCAmelCase_: int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCAmelCase_: int = F'Resistor at index {index} has a negative value!'
raise ValueError(lowerCAmelCase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
| 0
|
from __future__ import annotations
from collections.abc import MutableSequence
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
SCREAMING_SNAKE_CASE_ : list[float] = list(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = degree
def __add__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.degree > polynomial_a.degree:
SCREAMING_SNAKE_CASE_ : Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : Any = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _SCREAMING_SNAKE_CASE )
def __sub__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_SCREAMING_SNAKE_CASE )
return polynomial
def __repr__( self ):
"""simple docstring"""
return self.__str__()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[float] = [0] * self.degree
for i in range(self.degree ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[float] = [0] * (self.degree + 2)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = constant
for i in range(self.degree + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _SCREAMING_SNAKE_CASE )
def __eq__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return not self.__eq__(_SCREAMING_SNAKE_CASE )
| 253
|
lowerCAmelCase : str = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 253
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( snake_case__):
"""simple docstring"""
a__ : Optional[Any] = "bert"
def __init__( self : Optional[int] , __UpperCAmelCase : Dict=30_522 , __UpperCAmelCase : Optional[Any]=768 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : int=3_072 , __UpperCAmelCase : str="gelu" , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : List[str]=1E-12 , __UpperCAmelCase : Any=0 , __UpperCAmelCase : Optional[int]="absolute" , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : int=None , **__UpperCAmelCase : str , ) -> Tuple:
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase_= vocab_size
UpperCAmelCase_= hidden_size
UpperCAmelCase_= num_hidden_layers
UpperCAmelCase_= num_attention_heads
UpperCAmelCase_= hidden_act
UpperCAmelCase_= intermediate_size
UpperCAmelCase_= hidden_dropout_prob
UpperCAmelCase_= attention_probs_dropout_prob
UpperCAmelCase_= max_position_embeddings
UpperCAmelCase_= type_vocab_size
UpperCAmelCase_= initializer_range
UpperCAmelCase_= layer_norm_eps
UpperCAmelCase_= position_embedding_type
UpperCAmelCase_= use_cache
UpperCAmelCase_= classifier_dropout
class lowercase ( snake_case__):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_= {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase_= {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 277
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : Any , *__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[int]=None , **__UpperCAmelCase : Dict ) -> Optional[int]:
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase_= eval_examples
UpperCAmelCase_= post_process_function
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Optional[Dataset] = None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "eval" , **__UpperCAmelCase : Any , ) -> Dict[str, float]:
UpperCAmelCase_= gen_kwargs.copy()
UpperCAmelCase_= (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
UpperCAmelCase_= (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
UpperCAmelCase_= gen_kwargs
UpperCAmelCase_= self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_= self.get_eval_dataloader(__UpperCAmelCase )
UpperCAmelCase_= self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_= self.compute_metrics
UpperCAmelCase_= None
UpperCAmelCase_= time.time()
UpperCAmelCase_= self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_= eval_loop(
__UpperCAmelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
UpperCAmelCase_= compute_metrics
UpperCAmelCase_= self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase_= self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase_= metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
else:
UpperCAmelCase_= output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_= self.callback_handler.on_evaluate(self.args , self.state , self.control , __UpperCAmelCase )
return metrics
def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : str = "test" , **__UpperCAmelCase : List[str] ) -> Tuple:
UpperCAmelCase_= gen_kwargs.copy()
UpperCAmelCase_= self.get_test_dataloader(__UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_= self.compute_metrics
UpperCAmelCase_= None
UpperCAmelCase_= time.time()
UpperCAmelCase_= self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_= eval_loop(
__UpperCAmelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
UpperCAmelCase_= compute_metrics
UpperCAmelCase_= self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_= self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , """predict""" )
UpperCAmelCase_= self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase_= metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__UpperCAmelCase )
| 277
| 1
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_a = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
_a = "hopper-medium-v2"
_a = gym.make(env_name)
_a = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
_a = env.reset()
_a = 0
_a = 0
_a = 1_000
_a = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_a = pipeline(obs, planning_horizon=32)
# execute action in environment
_a , _a , _a , _a = env.step(denorm_actions)
_a = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
f""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
_a = next_observation
except KeyboardInterrupt:
pass
print(f"""Total reward: {total_reward}""")
| 209
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = "▁"
_a = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
_a = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
_a = {
"facebook/m2m100_418M": 1_024,
}
# fmt: off
_a = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="m2m100" , __lowerCAmelCase = None , __lowerCAmelCase=8 , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase__ = language_codes
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowerCamelCase__ = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
lowerCamelCase__ = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__lowerCAmelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = load_json(__lowerCAmelCase )
lowerCamelCase__ = {v: k for k, v in self.encoder.items()}
lowerCamelCase__ = spm_file
lowerCamelCase__ = load_spm(__lowerCAmelCase , self.sp_model_kwargs )
lowerCamelCase__ = len(self.encoder )
lowerCamelCase__ = {
self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )
}
lowerCamelCase__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )}
lowerCamelCase__ = {v: k for k, v in self.lang_token_to_id.items()}
lowerCamelCase__ = src_lang if src_lang is not None else '''en'''
lowerCamelCase__ = tgt_lang
lowerCamelCase__ = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowerCamelCase__ = num_madeup_words
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
lowerCamelCase__ = []
else:
current_sub_tokens.append(__lowerCAmelCase )
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
lowerCamelCase__ = [1] * len(self.prefix_tokens )
lowerCamelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase__ = {}
lowerCamelCase__ = load_spm(self.spm_file , self.sp_model_kwargs )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = Path(__lowerCAmelCase )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
lowerCamelCase__ = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowerCamelCase__ = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (str(__lowerCAmelCase ), str(__lowerCAmelCase ))
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = "en" , __lowerCAmelCase = None , __lowerCAmelCase = "ro" , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = self.get_lang_id(__lowerCAmelCase )
lowerCamelCase__ = tgt_lang_id
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.get_lang_token(__lowerCAmelCase )
lowerCamelCase__ = self.lang_token_to_id[lang_token]
lowerCamelCase__ = [self.cur_lang_id]
lowerCamelCase__ = [self.eos_token_id]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.get_lang_token(__lowerCAmelCase )
lowerCamelCase__ = self.lang_token_to_id[lang_token]
lowerCamelCase__ = [self.cur_lang_id]
lowerCamelCase__ = [self.eos_token_id]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.lang_code_to_token[lang]
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.get_lang_token(__lowerCAmelCase )
return self.lang_token_to_id[lang_token]
def lowerCAmelCase__(__snake_case ,__snake_case ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
lowerCamelCase__ = sentencepiece.SentencePieceProcessor(**__snake_case )
spm.Load(str(__snake_case ) )
return spm
def lowerCAmelCase__(__snake_case ) -> Union[Dict, List]:
'''simple docstring'''
with open(__snake_case ,'''r''' ) as f:
return json.load(__snake_case )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> None:
'''simple docstring'''
with open(__snake_case ,'''w''' ) as f:
json.dump(__snake_case ,__snake_case ,indent=2 )
| 209
| 1
|
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'mvp'
__UpperCamelCase = ['past_key_values']
__UpperCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Tuple , a__ : str=5_02_67 , a__ : Tuple=10_24 , a__ : int=12 , a__ : Optional[Any]=40_96 , a__ : Optional[Any]=16 , a__ : List[str]=12 , a__ : int=40_96 , a__ : Optional[int]=16 , a__ : List[Any]=0.0 , a__ : Any=0.0 , a__ : Optional[int]="gelu" , a__ : List[Any]=10_24 , a__ : int=0.1 , a__ : Optional[Any]=0.0 , a__ : Dict=0.0 , a__ : Optional[int]=0.0_2 , a__ : Optional[Any]=0.0 , a__ : Optional[int]=False , a__ : Optional[int]=True , a__ : List[Any]=1 , a__ : Optional[int]=0 , a__ : Any=2 , a__ : Union[str, Any]=True , a__ : str=2 , a__ : Any=2 , a__ : str=False , a__ : Dict=1_00 , a__ : Any=8_00 , **a__ : str , ) -> Optional[Any]:
'''simple docstring'''
_A = vocab_size
_A = max_position_embeddings
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = classifier_dropout
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = use_prompt
_A = prompt_length
_A = prompt_mid_dim
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , forced_eos_token_id=a__ , **a__ , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , a__ ):
_A = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"The config can simply be saved and uploaded again to be fixed." )
| 163
|
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
a_ = namedtuple("covid_data", "cases deaths recovered")
def a__ ( __lowercase = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
_A = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(__lowercase ).content ).xpath(__lowercase ) )
a_ = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 163
| 1
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
def __init__( self : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int=13 , lowerCamelCase__ : Union[str, Any]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : str=True , lowerCamelCase__ : str=True , lowerCamelCase__ : Dict=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : Optional[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Tuple=10 , lowerCamelCase__ : List[Any]=0.02 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : str=0.6 , lowerCamelCase__ : int=None , ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Any = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : List[Any] = image_size
UpperCamelCase__ : str = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : int = is_training
UpperCamelCase__ : Dict = use_labels
UpperCamelCase__ : int = hidden_size
UpperCamelCase__ : Union[str, Any] = num_hidden_layers
UpperCamelCase__ : Tuple = num_attention_heads
UpperCamelCase__ : Union[str, Any] = intermediate_size
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : str = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = type_sequence_label_size
UpperCamelCase__ : str = initializer_range
UpperCamelCase__ : str = mask_ratio
UpperCamelCase__ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : Optional[int] = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = ViTMAEModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : List[str] = model(lowerCamelCase__ )
UpperCamelCase__ : int = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : int = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ : Any = model(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : Any = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = config_and_inputs
UpperCamelCase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
A: Optional[Any] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
A: Union[str, Any] = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
A: Any = False
A: str = False
A: Optional[int] = False
A: Any = False
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ViTMAEModelTester(self )
UpperCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(lowerCamelCase__ )
UpperCamelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Optional[int] = [*signature.parameters.keys()]
UpperCamelCase__ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase__ : List[str] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase__ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase__ : Optional[Any] = torch.from_numpy(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : Union[str, Any] = pt_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
UpperCamelCase__ : int = outputs[0].cpu().numpy()
UpperCamelCase__ : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ : Any = model_class.from_pretrained(lowerCamelCase__ )
model.to(lowerCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
# Make sure we don't have nans
UpperCamelCase__ : Union[str, Any] = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1E-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Dict = ViTMAEModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : str ) -> Any:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase__ : Dict = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = self.default_image_processor
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : str = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Tuple = ViTMAEConfig()
UpperCamelCase__ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase__ : Dict = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase__ : List[Any] = model(**lowerCamelCase__ , noise=torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ ) )
# verify the logits
UpperCamelCase__ : Optional[Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase__ ) , atol=1E-4 ) )
| 146
|
import os
import sys
__UpperCamelCase : Optional[Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__UpperCamelCase : Tuple = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def _a ( *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
return AutoConfig.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _a ( *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModel.__doc__ )
def _a ( *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
return AutoModel.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _a ( *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _a ( *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _a ( *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _a ( *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 146
| 1
|
"""simple docstring"""
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_lowerCAmelCase : Union[str, Any] = '''Usage of script: script_name <size_of_canvas:int>'''
_lowerCAmelCase : List[str] = [0] * 100 + [1] * 10
random.shuffle(choice)
def lowerCamelCase_( _lowerCamelCase ) -> list[list[bool]]:
'''simple docstring'''
_lowerCamelCase : str = [[False for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
return canvas
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
for i, row in enumerate(_lowerCamelCase ):
for j, _ in enumerate(_lowerCamelCase ):
_lowerCamelCase : Optional[int] = bool(random.getrandbits(1 ) )
def lowerCamelCase_( _lowerCamelCase ) -> list[list[bool]]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase )
_lowerCamelCase : Any = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(_lowerCamelCase ):
for c, pt in enumerate(_lowerCamelCase ):
_lowerCamelCase : int = __judge_point(
_lowerCamelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_lowerCamelCase : Optional[int] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_lowerCamelCase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_lowerCamelCase : Tuple = pt
if pt:
if alive < 2:
_lowerCamelCase : List[str] = False
elif alive == 2 or alive == 3:
_lowerCamelCase : Tuple = True
elif alive > 3:
_lowerCamelCase : List[str] = False
else:
if alive == 3:
_lowerCamelCase : Dict = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_lowerCAmelCase : Dict = int(sys.argv[1])
# main working structure of this module.
_lowerCAmelCase : List[str] = create_canvas(canvas_size)
seed(c)
_lowerCAmelCase , _lowerCAmelCase : List[Any] = plt.subplots()
fig.show()
_lowerCAmelCase : Optional[int] = ListedColormap(['''w''', '''k'''])
try:
while True:
_lowerCAmelCase : str = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 340
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_lowerCamelCase : int = precision
_lowerCamelCase : Dict = ceil(precision / 14 )
_lowerCamelCase : Optional[Any] = 426880 * Decimal(10005 ).sqrt()
_lowerCamelCase : int = 1
_lowerCamelCase : Optional[int] = 13591409
_lowerCamelCase : int = Decimal(_lowerCamelCase )
for k in range(1 , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 340
| 1
|
import math
def _a ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase__ : int = len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = int(math.floor(math.sqrt(SCREAMING_SNAKE_CASE ) ) )
UpperCamelCase__ : Optional[Any] = 0
while arr[min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - 1] < x:
UpperCamelCase__ : int = step
step += int(math.floor(math.sqrt(SCREAMING_SNAKE_CASE ) ) )
if prev >= n:
return -1
while arr[prev] < x:
UpperCamelCase__ : Optional[Any] = prev + 1
if prev == min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__UpperCamelCase : Any = input("Enter numbers separated by a comma:\n").strip()
__UpperCamelCase : Any = [int(item) for item in user_input.split(",")]
__UpperCamelCase : Optional[Any] = int(input("Enter the number to be searched:\n"))
__UpperCamelCase : str = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f"Number {x} is at index {res}")
| 146
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Optional[Any] , lowerCamelCase__ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase__ : Optional[NamedSplit] = None , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : int , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : int = path_or_paths
UpperCamelCase__ : List[Any] = split if split or isinstance(lowerCamelCase__ , lowerCamelCase__ ) else '''train'''
UpperCamelCase__ : Optional[Any] = features
UpperCamelCase__ : List[Any] = cache_dir
UpperCamelCase__ : Optional[int] = keep_in_memory
UpperCamelCase__ : int = streaming
UpperCamelCase__ : Union[str, Any] = num_proc
UpperCamelCase__ : List[Any] = kwargs
@abstractmethod
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : int , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = features
UpperCamelCase__ : Optional[int] = cache_dir
UpperCamelCase__ : Union[str, Any] = keep_in_memory
UpperCamelCase__ : Tuple = streaming
UpperCamelCase__ : Optional[Any] = num_proc
UpperCamelCase__ : Union[str, Any] = kwargs
@abstractmethod
def UpperCAmelCase__ ( self : Tuple ) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 146
| 1
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = LxmertConfig.from_json_file(lowercase__ )
print(f'Building PyTorch model from configuration: {config}' )
_UpperCamelCase : List[Any] = LxmertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
snake_case_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 363
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_UpperCamelCase : str = (low + high) // 2
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Dict = max_subarray(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = max_subarray(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Tuple = max_cross_sum(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Optional[Any] = float('-inf' ), -1
_UpperCamelCase , _UpperCamelCase : int = float('-inf' ), -1
_UpperCamelCase : int | float = 0
for i in range(UpperCAmelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_UpperCamelCase : Optional[int] = summ
_UpperCamelCase : Union[str, Any] = i
_UpperCamelCase : List[Any] = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_UpperCamelCase : List[Any] = summ
_UpperCamelCase : List[str] = i
return max_left, max_right, (left_sum + right_sum)
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[str] = [randint(1 , UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ )]
_UpperCamelCase : Optional[Any] = time.time()
max_subarray(UpperCAmelCase_ , 0 , input_size - 1 )
_UpperCamelCase : str = time.time()
return end - start
def A__ ( ):
_UpperCamelCase : Any = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
_UpperCamelCase : Dict = [time_max_subarray(UpperCAmelCase_ ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
print(UpperCAmelCase_ , '\t\t' , UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 236
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = BarthezTokenizer
UpperCAmelCase__ = BarthezTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
'''simple docstring'''
super().setUp()
A__ = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''')
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=UpperCAmelCase__)
A__ = tokenizer
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = '''<pad>'''
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(vocab_keys[-1] , '''<mask>''')
self.assertEqual(len(UpperCAmelCase__) , 101_122)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101_122)
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str]) ->str:
'''simple docstring'''
A__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
A__ = [0, 57, 3_018, 70_307, 91, 2]
A__ = self.tokenizer(
UpperCAmelCase__ , max_length=len(UpperCAmelCase__) , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='''pt''')
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
A__ = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = '''I was born in 92000, and this is falsé.'''
A__ = tokenizer.tokenize(UpperCAmelCase__)
A__ = rust_tokenizer.tokenize(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
A__ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
A__ = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(UpperCAmelCase__)
A__ = rust_tokenizer.encode(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
A__ = {'''input_ids''': [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
A__ = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=UpperCAmelCase__ , )
| 14
|
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_6 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ) -> int:
__UpperCamelCase : List[str] = parent
__UpperCamelCase : str = batch_size
__UpperCamelCase : str = decoder_seq_length
# For common tests
__UpperCamelCase : Optional[int] = self.decoder_seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Tuple = use_attention_mask
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Dict = vocab_size
__UpperCamelCase : Optional[int] = d_model
__UpperCamelCase : Union[str, Any] = d_model
__UpperCamelCase : int = decoder_layers
__UpperCamelCase : Dict = decoder_layers
__UpperCamelCase : str = decoder_ffn_dim
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : List[Any] = eos_token_id
__UpperCamelCase : int = bos_token_id
__UpperCamelCase : Tuple = pad_token_id
__UpperCamelCase : Tuple = decoder_start_token_id
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : Optional[Any] = max_position_embeddings
__UpperCamelCase : int = None
__UpperCamelCase : Optional[int] = decoder_seq_length
__UpperCamelCase : Optional[int] = 2
__UpperCamelCase : Optional[int] = 1
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : int = None
if self.use_attention_mask:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__UpperCamelCase : List[str] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : Optional[Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Optional[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__UpperCamelCase : Optional[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__UpperCamelCase : str = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
__UpperCamelCase : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Tuple = model(_UpperCAmelCase )["last_hidden_state"]
__UpperCamelCase : Any = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"]
# select random slice
__UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__UpperCamelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[str] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = config_and_inputs
__UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A = (TrOCRForCausalLM,) if is_torch_available() else ()
A = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A = True
A = False
def a_ (self ) -> List[str]:
__UpperCamelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase )
__UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase )
def a_ (self ) -> Dict:
pass
def a_ (self ) -> Optional[int]:
pass
def a_ (self ) -> Optional[Any]:
pass
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> List[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def a_ (self ) -> Any:
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def a_ (self ) -> Tuple:
pass
| 298
| 0
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = ['''image_processor''', '''tokenizer''']
UpperCamelCase__ = '''FlavaImageProcessor'''
UpperCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : str , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , **lowercase_ : int ):
lowercase_ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase_ , )
lowercase_ : Dict = kwargs.pop("""feature_extractor""" )
lowercase_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase_ , lowercase_ )
lowercase_ : List[str] = self.image_processor
def __call__( self : Tuple , lowercase_ : Optional[ImageInput] = None , lowercase_ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = False , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Union[str, Any] , ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowercase_ : Optional[int] = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
if images is not None:
lowercase_ : int = self.image_processor(
lowercase_ , return_image_mask=lowercase_ , return_codebook_pixels=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
if text is not None and images is not None:
encoding.update(lowercase_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , *lowercase_ : Dict , **lowercase_ : Union[str, Any] ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : str ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Optional[Any] = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase_ , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase_ , )
return self.image_processor
| 364
|
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_lowercase : Union[str, Any] = "src/transformers"
_lowercase : str = "docs/source/en"
_lowercase : Union[str, Any] = "."
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> int:
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Union[str, Any] = f.readlines()
# Find the start prompt.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(UpperCAmelCase__ ):
start_index += 1
start_index += 1
lowercase_ : int = start_index
while not lines[end_index].startswith(UpperCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_lowercase : int = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_lowercase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowercase : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any:
lowercase_ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase__ )
return [m.group(0 ) for m in matches]
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> List[Any]:
lowercase_ : Dict = 2 if text == """✅""" or text == """❌""" else len(UpperCAmelCase__ )
lowercase_ : List[str] = (width - text_length) // 2
lowercase_ : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase ( ) -> Any:
lowercase_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase_ : Any = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase_ : int = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase_ : List[Any] = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : List[str] = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Any = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Tuple = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Optional[int] = collections.defaultdict(UpperCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = None
if attr_name.endswith("""Tokenizer""" ):
lowercase_ : Optional[int] = slow_tokenizers
lowercase_ : Union[str, Any] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowercase_ : Optional[Any] = fast_tokenizers
lowercase_ : Dict = attr_name[:-13]
elif _re_tf_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : str = tf_models
lowercase_ : str = _re_tf_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : List[str] = flax_models
lowercase_ : int = _re_flax_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : Tuple = pt_models
lowercase_ : Optional[int] = _re_pt_models.match(UpperCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase_ : int = True
break
# Try again after removing the last word in the name
lowercase_ : Optional[Any] = """""".join(camel_case_split(UpperCAmelCase__ )[:-1] )
# Let's build that table!
lowercase_ : Dict = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase_ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase_ : Union[str, Any] = [len(UpperCAmelCase__ ) + 2 for c in columns]
lowercase_ : int = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
lowercase_ : Tuple = """|""" + """|""".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowercase_ : int = {True: """✅""", False: """❌"""}
for name in model_names:
lowercase_ : str = model_name_to_prefix[name]
lowercase_ : Any = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + "|\n"
return table
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=False ) -> str:
lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = _find_text_in_file(
filename=os.path.join(UpperCAmelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowercase_ : Dict = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCAmelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowercase : Optional[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 21
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='time_series_transformer'
lowerCamelCase__ ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__(self , a_ = None , a_ = None , a_ = "student_t" , a_ = "nll" , a_ = 1 , a_ = [1, 2, 3, 4, 5, 6, 7] , a_ = "mean" , a_ = 0 , a_ = 0 , a_ = 0 , a_ = 0 , a_ = None , a_ = None , a_ = 32 , a_ = 32 , a_ = 2 , a_ = 2 , a_ = 2 , a_ = 2 , a_ = True , a_ = "gelu" , a_ = 64 , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 0.1 , a_ = 1_00 , a_ = 0.02 , a_=True , **a_ , ):
'''simple docstring'''
__snake_case : Union[str, Any] = prediction_length
__snake_case : List[str] = context_length or prediction_length
__snake_case : Tuple = distribution_output
__snake_case : Union[str, Any] = loss
__snake_case : Tuple = input_size
__snake_case : Optional[Any] = num_time_features
__snake_case : Tuple = lags_sequence
__snake_case : Optional[int] = scaling
__snake_case : Optional[Any] = num_dynamic_real_features
__snake_case : Optional[Any] = num_static_real_features
__snake_case : List[str] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__snake_case : Dict = cardinality
else:
__snake_case : Optional[int] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__snake_case : Dict = embedding_dimension
else:
__snake_case : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__snake_case : int = num_parallel_samples
# Transformer architecture configuration
__snake_case : Union[str, Any] = input_size * len(a_ ) + self._number_of_features
__snake_case : Any = d_model
__snake_case : Union[str, Any] = encoder_attention_heads
__snake_case : Any = decoder_attention_heads
__snake_case : Any = encoder_ffn_dim
__snake_case : List[Any] = decoder_ffn_dim
__snake_case : Tuple = encoder_layers
__snake_case : Union[str, Any] = decoder_layers
__snake_case : int = dropout
__snake_case : List[str] = attention_dropout
__snake_case : Optional[int] = activation_dropout
__snake_case : Dict = encoder_layerdrop
__snake_case : Optional[Any] = decoder_layerdrop
__snake_case : int = activation_function
__snake_case : Union[str, Any] = init_std
__snake_case : int = use_cache
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 102
|
"""simple docstring"""
import numpy as np
def lowercase ( _snake_case : int , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] ) ->Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = int(np.ceil((x_end - xa) / h ) )
__snake_case : Dict = np.zeros((n + 1,) )
__snake_case : List[Any] = ya
__snake_case : int = xa
for k in range(_snake_case ):
__snake_case : Any = f(_snake_case , y[k] )
__snake_case : List[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__snake_case : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__snake_case : Optional[int] = f(x + h , y[k] + h * ka )
__snake_case : Optional[int] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , ) -> Dict:
__UpperCamelCase :int = parent
__UpperCamelCase :List[str] = 13
__UpperCamelCase :List[Any] = 7
__UpperCamelCase :List[str] = True
__UpperCamelCase :List[Any] = True
__UpperCamelCase :int = True
__UpperCamelCase :List[str] = True
__UpperCamelCase :Any = True
__UpperCamelCase :List[Any] = False
__UpperCamelCase :Optional[Any] = False
__UpperCamelCase :List[str] = False
__UpperCamelCase :Any = 2
__UpperCamelCase :List[str] = 99
__UpperCamelCase :Any = 0
__UpperCamelCase :Union[str, Any] = 32
__UpperCamelCase :Any = 2
__UpperCamelCase :str = 4
__UpperCamelCase :int = 0.1
__UpperCamelCase :Optional[Any] = 0.1
__UpperCamelCase :Any = 512
__UpperCamelCase :Any = 16
__UpperCamelCase :List[Any] = 2
__UpperCamelCase :Dict = 0.02
__UpperCamelCase :List[str] = 3
__UpperCamelCase :str = 4
__UpperCamelCase :Dict = 'last'
__UpperCamelCase :List[Any] = True
__UpperCamelCase :Union[str, Any] = None
__UpperCamelCase :Any = 0
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :int = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa)
__UpperCamelCase :Dict = None
if self.use_input_lengths:
__UpperCamelCase :List[str] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase :Optional[Any] = None
if self.use_token_type_ids:
__UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
__UpperCamelCase :Dict = None
__UpperCamelCase :int = None
__UpperCamelCase :Any = None
if self.use_labels:
__UpperCamelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :Dict = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa)
__UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :int = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> str:
__UpperCamelCase :Tuple = TFFlaubertModel(config=__lowercase)
__UpperCamelCase :Dict = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase :Dict = model(__lowercase)
__UpperCamelCase :str = [input_ids, input_mask]
__UpperCamelCase :Any = model(__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Optional[int]:
__UpperCamelCase :List[Any] = TFFlaubertWithLMHeadModel(__lowercase)
__UpperCamelCase :Optional[int] = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase :List[str] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
__UpperCamelCase :str = TFFlaubertForQuestionAnsweringSimple(__lowercase)
__UpperCamelCase :Dict = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase :List[Any] = model(__lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> List[str]:
__UpperCamelCase :Optional[Any] = TFFlaubertForSequenceClassification(__lowercase)
__UpperCamelCase :Any = {'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase :List[str] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Tuple:
__UpperCamelCase :Optional[Any] = self.num_labels
__UpperCamelCase :str = TFFlaubertForTokenClassification(config=__lowercase)
__UpperCamelCase :Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase :str = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]:
__UpperCamelCase :Any = self.num_choices
__UpperCamelCase :List[Any] = TFFlaubertForMultipleChoice(config=__lowercase)
__UpperCamelCase :str = tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
__UpperCamelCase :Any = tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
__UpperCamelCase :Optional[Any] = tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
__UpperCamelCase :Optional[Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase :Dict = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Any = self.prepare_config_and_inputs()
(
__UpperCamelCase
) :List[Any] = config_and_inputs
__UpperCamelCase :Any = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
a__ : Union[str, Any] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ : Any = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
a__ : str = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ : Union[str, Any] = False
a__ : Tuple = False
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[int] = TFFlaubertModelTester(self)
__UpperCamelCase :int = ConfigTester(self , config_class=__lowercase , emb_dim=37)
def UpperCamelCase__ ( self) -> List[str]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__lowercase)
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__lowercase)
@slow
def UpperCamelCase__ ( self) -> Dict:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Dict = TFFlaubertModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[str] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''')
__UpperCamelCase :List[Any] = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase :List[str] = model(__lowercase)[0]
__UpperCamelCase :List[str] = tf.TensorShape((1, 8, 512))
self.assertEqual(output.shape , __lowercase)
# compare the actual values for a slice.
__UpperCamelCase :int = tf.convert_to_tensor(
[
[
[-1.8_76_87_73, -1.56_65_55, 0.27_07_24_18],
[-1.6_92_00_38, -0.5_87_35_05, 1.9_32_95_99],
[-2.9_56_39_85, -1.6_99_38_35, 1.7_97_20_52],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 359
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__lowercase = '''
import os
'''
__lowercase = '''
def foo():
import os
return False
'''
__lowercase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
__lowercase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
except:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
__lowercase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
__lowercase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''test_file.py''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as _tmp_file:
_tmp_file.write(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = get_imports(SCREAMING_SNAKE_CASE )
assert parsed_imports == ["os"]
| 105
| 0
|
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__A = datasets.logging.get_logger(__name__)
__A = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
__A = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
__A = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def UpperCamelCase__ ( lowercase__ : Any , lowercase__ : Any , lowercase__ : List[str]=False , lowercase__ : Dict=False , lowercase__ : Dict=True , lowercase__ : Union[str, Any]=False , lowercase__ : Union[str, Any]="dummy_doc" ):
snake_case : Optional[Any] = {doc: key_lines}
snake_case : Optional[Any] = {doc: sys_lines}
snake_case : List[str] = {}
snake_case : Tuple = 0
snake_case : str = 0
snake_case : List[str] = 0
snake_case : List[str] = 0
snake_case : Tuple = 0
snake_case : int = 0
snake_case , snake_case : List[str] = reader.get_doc_mentions(lowercase__ , key_doc_lines[doc] , lowercase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
snake_case : int = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
snake_case , snake_case : Optional[int] = reader.get_doc_mentions(lowercase__ , sys_doc_lines[doc] , lowercase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
snake_case : int = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
if remove_nested:
snake_case , snake_case : Any = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
snake_case , snake_case : List[Any] = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
snake_case : Any = reader.get_mention_assignments(lowercase__ , lowercase__ )
snake_case : str = reader.get_mention_assignments(lowercase__ , lowercase__ )
snake_case : List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"Number of resulting singleton clusters in the key "
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"files, respectively" )
return doc_coref_infos
def UpperCamelCase__ ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Dict , lowercase__ : Dict ):
snake_case : List[str] = get_coref_infos(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
snake_case : Any = {}
snake_case : int = 0
snake_case : Optional[Any] = 0
for name, metric in metrics:
snake_case , snake_case , snake_case : Dict = evaluator.evaluate_documents(lowercase__ , lowercase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
snake_case : Optional[int] = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"conll_score": conll} )
return output_scores
def UpperCamelCase__ ( lowercase__ : Union[str, Any] ):
snake_case : List[str] = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
snake_case : Any = line.split()[5]
if not parse_col == "-":
snake_case : Optional[int] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
snake_case : Optional[int] = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
snake_case : Union[str, Any] = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
snake_case : Dict = evaluate(
key_lines=SCREAMING_SNAKE_CASE , sys_lines=SCREAMING_SNAKE_CASE , metrics=SCREAMING_SNAKE_CASE , NP_only=SCREAMING_SNAKE_CASE , remove_nested=SCREAMING_SNAKE_CASE , keep_singletons=SCREAMING_SNAKE_CASE , min_span=SCREAMING_SNAKE_CASE , )
return score
| 148
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 148
| 1
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def lowerCamelCase__ ( a , a , a , a , a ) -> float:
_A: Union[str, Any] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(a )] )
_A: Dict = np.array(a )
_A: str = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , a ) ) , x.transpose() ) , a )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def lowerCamelCase__ ( a , a , a ) -> float:
_A: List[Any] = (1, 2, 1)
_A: List[Any] = (1, 1, 0, 7)
_A: Dict = SARIMAX(
a , exog=a , order=a , seasonal_order=a )
_A: str = model.fit(disp=a , maxiter=6_00 , method='''nm''' )
_A: Optional[Any] = model_fit.predict(1 , len(a ) , exog=[test_match] )
return result[0]
def lowerCamelCase__ ( a , a , a ) -> float:
_A: str = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(a , a )
_A: List[Any] = regressor.predict(a )
return y_pred[0]
def lowerCamelCase__ ( a ) -> float:
train_user.sort()
_A: Any = np.percentile(a , 25 )
_A: str = np.percentile(a , 75 )
_A: Dict = qa - qa
_A: int = qa - (iqr * 0.1)
return low_lim
def lowerCamelCase__ ( a , a ) -> bool:
_A: List[str] = 0
_A: Any = 0
for i in list_vote:
if i > actual_result:
_A: str = not_safe + 1
else:
if abs(abs(a ) - abs(a ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCAmelCase__ : Dict = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
UpperCAmelCase__ : Dict = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
UpperCAmelCase__ : Optional[Any] = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCAmelCase__ : Any = normalize_df[:, 2].tolist()
UpperCAmelCase__ : Dict = normalize_df[:, 0].tolist()
UpperCAmelCase__ : Optional[Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCAmelCase__ : Any = normalize_df[:, [1, 2]].tolist()
UpperCAmelCase__ : Any = x[: len(x) - 1]
UpperCAmelCase__ : int = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCAmelCase__ : int = total_date[: len(total_date) - 1]
UpperCAmelCase__ : Union[str, Any] = total_user[: len(total_user) - 1]
UpperCAmelCase__ : List[str] = total_match[: len(total_match) - 1]
UpperCAmelCase__ : Optional[int] = total_date[len(total_date) - 1 :]
UpperCAmelCase__ : Union[str, Any] = total_user[len(total_user) - 1 :]
UpperCAmelCase__ : Dict = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCAmelCase__ : Dict = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCAmelCase__ : List[Any] = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 358
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase__ : Tuple = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase__ : Optional[int] = {'facebook/blenderbot_small-90M': 512}
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: List[Any] = set()
_A: List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A: List[Any] = char
_A: Union[str, Any] = set(a )
return pairs
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]="__start__" , lowerCAmelCase_ : Any="__end__" , lowerCAmelCase_ : Any="__unk__" , lowerCAmelCase_ : Any="__null__" , **lowerCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as vocab_handle:
_A: Optional[int] = json.load(lowerCAmelCase_ )
_A: int = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
_A: Dict = merges_handle.read().split('''\n''' )[1:-1]
_A: int = [tuple(merge.split() ) for merge in merges]
_A: Dict = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A: Union[str, Any] = {}
@property
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.encoder )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_A: List[Any] = re.sub('''([.,!?()])''' , R''' \1''' , lowerCAmelCase_ )
_A: List[Any] = re.sub('''(\')''' , R''' \1 ''' , lowerCAmelCase_ )
_A: List[Any] = re.sub(R'''\s{2,}''' , ''' ''' , lowerCAmelCase_ )
if "\n" in token:
_A: Dict = token.replace('''\n''' , ''' __newln__''' )
_A: Any = token.split(''' ''' )
_A: Optional[Any] = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A: str = token.lower()
_A: List[str] = tuple(lowerCAmelCase_ )
_A: str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_A: Dict = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A: str = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A: Optional[int] = bigram
_A: str = []
_A: Dict = 0
while i < len(lowerCAmelCase_ ):
try:
_A: List[Any] = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A: Optional[int] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A: Union[str, Any] = tuple(lowerCAmelCase_ )
_A: Tuple = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A: Optional[int] = get_pairs(lowerCAmelCase_ )
_A: str = '''@@ '''.join(lowerCAmelCase_ )
_A: Tuple = word[:-4]
_A: List[Any] = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: List[Any] = []
_A: List[Any] = re.findall(R'''\S+\n?''' , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: List[str] = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : int , lowerCAmelCase_ : int ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: List[str] = ''' '''.join(lowerCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A: Dict = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Any = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '''\n''' )
_A: List[str] = 0
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_A: Optional[int] = token_index
writer.write(''' '''.join(lowerCAmelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 301
| 0
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Any=[1, 2, 1] , UpperCamelCase__ : int=[2, 2, 4] , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[int]=2.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Union[str, Any]=1E-5 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : Dict=8 , UpperCamelCase__ : Tuple=["stage1", "stage2", "stage3"] , UpperCamelCase__ : Tuple=[1, 2, 3] , ) -> Dict:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
__magic_name__ = window_size
__magic_name__ = mlp_ratio
__magic_name__ = qkv_bias
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = drop_path_rate
__magic_name__ = hidden_act
__magic_name__ = use_absolute_embeddings
__magic_name__ = patch_norm
__magic_name__ = layer_norm_eps
__magic_name__ = initializer_range
__magic_name__ = is_training
__magic_name__ = scope
__magic_name__ = use_labels
__magic_name__ = type_sequence_label_size
__magic_name__ = encoder_stride
__magic_name__ = out_features
__magic_name__ = out_indices
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ )
__magic_name__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCamelCase__ ):
__magic_name__ = ["""stem"""]
__magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ )
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a__ = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowercase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return
def _lowercase ( self : str ) -> str:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ = outputs.hidden_states
__magic_name__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# Swin has a different seq_length
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCamelCase__ : Union[str, Any] ):
__magic_name__ = 0
return t
def check_equivalence(UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int={} ):
with torch.no_grad():
__magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple()
def recursive_check(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ):
if isinstance(UpperCamelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCamelCase__ ) , set_nan_tensor_to_zero(UpperCamelCase__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}. Dict has'''
F''' `nan`: {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}.'''
) , )
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _A ):
'''simple docstring'''
a__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a__ = MaskFormerSwinConfig
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModelTester(self )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__magic_name__ = backbone_class(UpperCamelCase__ )
backbone.to(UpperCamelCase__ )
backbone.eval()
__magic_name__ = backbone(**UpperCamelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCamelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__magic_name__ = backbone(**UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__magic_name__ , __magic_name__ , __magic_name__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__magic_name__ = backbone(**UpperCamelCase__ , output_attentions=UpperCamelCase__ )
self.assertIsNotNone(outputs.attentions )
| 88
|
from __future__ import annotations
from collections.abc import Iterator
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : int ) -> None:
"""simple docstring"""
__magic_name__ = value
__magic_name__ = None
__magic_name__ = None
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : Node ) -> None:
"""simple docstring"""
__magic_name__ = tree
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88
| 1
|
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def a ( __a ) -> List[Tuple[int, ...]]:
'''simple docstring'''
UpperCamelCase__ :List[str] = []
if isinstance(__a , __a ):
for v in tree.values():
shapes.extend(_fetch_dims(__a ) )
elif isinstance(__a , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__a ) )
elif isinstance(__a , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def a ( __a , __a ) -> Tuple[int, ...]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = []
for d in reversed(__a ):
idx.append(flat_idx % d )
UpperCamelCase__ :List[str] = flat_idx // d
return tuple(reversed(__a ) )
@torch.jit.ignore
def a ( __a , __a , __a , __a = None , __a = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(__a ) -> None:
UpperCamelCase__ :List[str] = True
for i in range(len(__a ) ):
UpperCamelCase__ :List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ :Union[str, Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ :Optional[int] = [s == 0 for s in start]
reduce_edge_list(__a )
if end_edges is None:
UpperCamelCase__ :Tuple = [e == (d - 1) for e, d in zip(__a , __a )]
reduce_edge_list(__a )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__a ) == 0:
return [()]
elif len(__a ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCamelCase__ :List[Tuple[slice, ...]] = []
UpperCamelCase__ :List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__a , __a ):
if s == e:
path_list.append(slice(__a , s + 1 ) )
else:
break
UpperCamelCase__ :Tuple[slice, ...] = tuple(__a )
UpperCamelCase__ :Optional[Any] = len(__a )
# start == end, and we're done
if divergence_idx == len(__a ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ :int = start[divergence_idx]
return tuple(
path + (slice(__a , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ :Optional[Any] = end[divergence_idx]
return tuple(
path + (slice(__a , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCamelCase__ :Optional[Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def a ( __a , __a , __a , __a ) -> torch.Tensor:
'''simple docstring'''
UpperCamelCase__ :int = t.shape[:no_batch_dims]
UpperCamelCase__ :List[str] = list(_flat_idx_to_idx(__a , __a ) )
# _get_minimal_slice_set is inclusive
UpperCamelCase__ :Dict = list(_flat_idx_to_idx(flat_end - 1 , __a ) )
# Get an ordered list of slices to perform
UpperCamelCase__ :Dict = _get_minimal_slice_set(
__a , __a , __a , )
UpperCamelCase__ :Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def a ( __a , __a , __a , __a , __a = False , __a = None , __a = False , ) -> Any:
'''simple docstring'''
if not (len(__a ) > 0):
raise ValueError('''Must provide at least one input''' )
UpperCamelCase__ :int = [shape[:no_batch_dims] for shape in _fetch_dims(__a )]
UpperCamelCase__ :Union[str, Any] = tuple([max(__a ) for s in zip(*__a )] )
def _prep_inputs(__a ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCamelCase__ :Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCamelCase__ :Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCamelCase__ :int = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCamelCase__ :Dict[str, Any] = tensor_tree_map(_prep_inputs , __a )
UpperCamelCase__ :Any = None
if _out is not None:
UpperCamelCase__ :Optional[int] = tensor_tree_map(lambda __a : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCamelCase__ :Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ :str = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__a ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ :Union[str, Any] = 0
UpperCamelCase__ :int = prepped_outputs
for _ in range(__a ):
# Chunk the input
if not low_mem:
UpperCamelCase__ :int = _select_chunk
else:
UpperCamelCase__ :str = partial(
_chunk_slice , flat_start=__a , flat_end=min(__a , i + chunk_size ) , no_batch_dims=len(__a ) , )
UpperCamelCase__ :Dict[str, Any] = tensor_tree_map(__a , __a )
# Run the layer on the chunk
UpperCamelCase__ :str = layer(**__a )
# Allocate space for the output
if out is None:
UpperCamelCase__ :Tuple = tensor_tree_map(lambda __a : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __a )
# Put the chunk in its pre-allocated space
if isinstance(__a , __a ):
def assign(__a , __a ) -> None:
for k, v in da.items():
if isinstance(__a , __a ):
assign(__a , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ :Union[str, Any] = da[k]
assign(__a , __a )
elif isinstance(__a , __a ):
for xa, xa in zip(__a , __a ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ :Optional[int] = xa
elif isinstance(__a , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ :Optional[Any] = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
UpperCamelCase__ :List[Any] = tensor_tree_map(lambda __a : t.view(orig_batch_dims + t.shape[1:] ) , __a )
return out
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ = 512 , ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = max_chunk_size
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :Optional[tuple] = None
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ :List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCamelCase__ :List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ :List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCamelCase_ ) -> bool:
try:
with torch.no_grad():
fn(*UpperCamelCase_ , chunk_size=UpperCamelCase_ )
return True
except RuntimeError:
return False
UpperCamelCase__ :Any = 0
UpperCamelCase__ :str = len(UpperCamelCase_ ) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ :Dict = test_chunk_size(candidates[i] )
if not viable:
UpperCamelCase__ :List[Any] = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ :Union[str, Any] = i
UpperCamelCase__ :Optional[int] = (i + len(UpperCamelCase_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = True
for aa, aa in zip(UpperCamelCase_ , UpperCamelCase_ ):
assert type(UpperCamelCase_ ) == type(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , (list, tuple) ):
consistent &= self._compare_arg_caches(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :List[str] = [v for _, v in sorted(aa.items() , key=lambda UpperCamelCase_ : x[0] )]
UpperCamelCase__ :int = [v for _, v in sorted(aa.items() , key=lambda UpperCamelCase_ : x[0] )]
consistent &= self._compare_arg_caches(UpperCamelCase_ , UpperCamelCase_ )
else:
consistent &= aa == aa
return consistent
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :str = True
UpperCamelCase__ :tuple = tree_map(lambda UpperCamelCase_ : a.shape if isinstance(UpperCamelCase_ , torch.Tensor ) else a , UpperCamelCase_ , UpperCamelCase_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(UpperCamelCase_ )
UpperCamelCase__ :Tuple = self._compare_arg_caches(self.cached_arg_data , UpperCamelCase_ )
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ :Optional[Any] = False
if not consistent:
UpperCamelCase__ :str = self._determine_favorable_chunk_size(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
UpperCamelCase__ :List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 359
|
'''simple docstring'''
from __future__ import annotations
__snake_case = [True] * 1000001
__snake_case = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
__snake_case = False
i += 1
def a ( __a ) -> bool:
'''simple docstring'''
return seive[n]
def a ( __a ) -> bool:
'''simple docstring'''
return any(digit in '''02468''' for digit in str(__a ) )
def a ( __a = 1000000 ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :Any = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__a ) and not contains_an_even_digit(__a ):
UpperCamelCase__ :str = str(__a )
UpperCamelCase__ :List[str] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__a ) )]
if all(is_prime(__a ) for i in list_nums ):
result.append(__a )
return result
def a ( ) -> int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"""{len(find_circular_primes()) = }""")
| 219
| 0
|
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = """"""
for word_or_phrase in separated:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(UpperCamelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 163
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _snake_case ( a__ ):
lowerCAmelCase :Optional[int] = '''pegasus'''
lowerCAmelCase :Optional[int] = ['''past_key_values''']
lowerCAmelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _lowerCamelCase=5_0265 , _lowerCamelCase=1024 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=12 , _lowerCamelCase=4096 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="gelu" , _lowerCamelCase=1024 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=0 , _lowerCamelCase=False , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=1 , **_lowerCamelCase , ):
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : List[Any] = d_model
UpperCAmelCase__ : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase__ : Any = encoder_layers
UpperCAmelCase__ : List[str] = encoder_attention_heads
UpperCAmelCase__ : int = decoder_ffn_dim
UpperCAmelCase__ : Any = decoder_layers
UpperCAmelCase__ : Tuple = decoder_attention_heads
UpperCAmelCase__ : Optional[int] = dropout
UpperCAmelCase__ : Dict = attention_dropout
UpperCAmelCase__ : Optional[int] = activation_dropout
UpperCAmelCase__ : Dict = activation_function
UpperCAmelCase__ : Optional[Any] = init_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Tuple = decoder_layerdrop
UpperCAmelCase__ : str = use_cache
UpperCAmelCase__ : Any = encoder_layers
UpperCAmelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
@property
def snake_case__ ( self):
return self.encoder_attention_heads
@property
def snake_case__ ( self):
return self.d_model
| 163
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase__ = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
UpperCamelCase__ = {
"""facebook/mbart-large-en-ro""": 1_024,
"""facebook/mbart-large-cc25""": 1_024,
}
# fmt: off
UpperCamelCase__ = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
snake_case : Tuple = VOCAB_FILES_NAMES
snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : str = PRETRAINED_VOCAB_FILES_MAP
snake_case : int = ['input_ids', 'attention_mask']
snake_case : Union[str, Any] = MBartTokenizer
snake_case : List[int] = []
snake_case : List[int] = []
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
UpperCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
super().__init__(
vocab_file=__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = False if not self.vocab_file else True
UpperCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
UpperCamelCase__ = {
lang_code: self.convert_tokens_to_ids(__lowerCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase__ = src_lang if src_lang is not None else """en_XX"""
UpperCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowerCamelCase ( self ):
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCamelCase__ = src_lang
UpperCamelCase__ = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = self.convert_tokens_to_ids(__lowerCAmelCase )
UpperCamelCase__ = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = "en_XX" , __lowerCAmelCase = None , __lowerCAmelCase = "ro_RO" , **__lowerCAmelCase , ):
UpperCamelCase__ = src_lang
UpperCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def _lowerCamelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def _lowerCamelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = self.convert_tokens_to_ids(__lowerCAmelCase )
UpperCamelCase__ = []
UpperCamelCase__ = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = self.convert_tokens_to_ids(__lowerCAmelCase )
UpperCamelCase__ = []
UpperCamelCase__ = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
return (out_vocab_file,)
| 356
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _lowerCamelCase ( self ):
UpperCamelCase__ = """ZinengTang/tvlt-base"""
UpperCamelCase__ = tempfile.mkdtemp()
def _lowerCamelCase ( self , **__lowerCAmelCase ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def _lowerCamelCase ( self , **__lowerCAmelCase ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def _lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __lowerCAmelCase )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
UpperCamelCase__ = np.ones([12000] )
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase__ = processor(audio=__lowerCAmelCase , return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
UpperCamelCase__ = np.ones([3, 224, 224] )
UpperCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase__ = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
UpperCamelCase__ = np.ones([12000] )
UpperCamelCase__ = np.ones([3, 224, 224] )
UpperCamelCase__ = processor(audio=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
| 87
| 0
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
A : str = logging.get_logger(__name__)
class __A( a ):
def __init__( self , **_snake_case ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''bs4'''] )
super().__init__(**_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a = []
__a = []
__a = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__a = parent.find_all(child.name , recursive=_snake_case )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_snake_case ) else next(i for i, s in enumerate(_snake_case , 1 ) if s is child ) )
__a = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = BeautifulSoup(_snake_case , '''html.parser''' )
__a = []
__a = []
__a = []
for element in html_code.descendants:
if type(_snake_case ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__a = html.unescape(_snake_case ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_snake_case )
__a , __a = self.xpath_soup(_snake_case )
stringaxtag_seq.append(_snake_case )
stringaxsubs_seq.append(_snake_case )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_snake_case ) != len(_snake_case ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = ''''''
for tagname, subs in zip(_snake_case , _snake_case ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , _snake_case ) -> BatchFeature:
'''simple docstring'''
__a = False
# Check that strings has a valid type
if isinstance(_snake_case , _snake_case ):
__a = True
elif isinstance(_snake_case , (list, tuple) ):
if len(_snake_case ) == 0 or isinstance(html_strings[0] , _snake_case ):
__a = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(_snake_case )}.""" )
__a = bool(isinstance(_snake_case , (list, tuple) ) and (isinstance(html_strings[0] , _snake_case )) )
if not is_batched:
__a = [html_strings]
# Get nodes + xpaths
__a = []
__a = []
for html_string in html_strings:
__a , __a , __a = self.get_three_from_single(_snake_case )
nodes.append(_snake_case )
__a = []
for node, tag_list, sub_list in zip(_snake_case , _snake_case , _snake_case ):
__a = self.construct_xpath(_snake_case , _snake_case )
xpath_strings.append(_snake_case )
xpaths.append(_snake_case )
# return as Dict
__a = {'''nodes''': nodes, '''xpaths''': xpaths}
__a = BatchFeature(data=_snake_case , tensor_type=_snake_case )
return encoded_inputs
| 6
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while b:
lowerCAmelCase , lowerCAmelCase : Any = b, a % b
return a
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE , a % b )
def a__ ( ):
'''simple docstring'''
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 108
| 0
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = AutoencoderKL
UpperCAmelCase__ : Tuple = "sample"
UpperCAmelCase__ : Optional[int] = 1e-2
@property
def _a ( self ) -> int:
__UpperCamelCase =4
__UpperCamelCase =3
__UpperCamelCase =(32, 32)
__UpperCamelCase =floats_tensor((batch_size, num_channels) + sizes ).to(A_ )
return {"sample": image}
@property
def _a ( self ) -> Any:
return (3, 32, 32)
@property
def _a ( self ) -> Union[str, Any]:
return (3, 32, 32)
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__UpperCamelCase =self.dummy_input
return init_dict, inputs_dict
def _a ( self ) -> List[Any]:
pass
def _a ( self ) -> Union[str, Any]:
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def _a ( self ) -> str:
# enable deterministic behavior for gradient checkpointing
__UpperCamelCase , __UpperCamelCase =self.prepare_init_args_and_inputs_for_common()
__UpperCamelCase =self.model_class(**A_ )
model.to(A_ )
assert not model.is_gradient_checkpointing and model.training
__UpperCamelCase =model(**A_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__UpperCamelCase =torch.randn_like(A_ )
__UpperCamelCase =(out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__UpperCamelCase =self.model_class(**A_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(A_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__UpperCamelCase =model_a(**A_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__UpperCamelCase =(out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__UpperCamelCase =dict(model.named_parameters() )
__UpperCamelCase =dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase =AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(A_ )
__UpperCamelCase =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _a ( self ) -> str:
__UpperCamelCase =AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__UpperCamelCase =model.to(A_ )
model.eval()
if torch_device == "mps":
__UpperCamelCase =torch.manual_seed(0 )
else:
__UpperCamelCase =torch.Generator(device=A_ ).manual_seed(0 )
__UpperCamelCase =torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__UpperCamelCase =image.to(A_ )
with torch.no_grad():
__UpperCamelCase =model(A_ , sample_posterior=A_ , generator=A_ ).sample
__UpperCamelCase =output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__UpperCamelCase =torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
__UpperCamelCase =torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__UpperCamelCase =torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(A_ , A_ , rtol=1E-2 ) )
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self , A_ , A_ ) -> Optional[int]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(A_ ) for s in shape] )}.npy'
def _a ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , A_=0 , A_=(4, 3, 512, 512) , A_=False ) -> List[str]:
__UpperCamelCase =torch.floataa if fpaa else torch.floataa
__UpperCamelCase =torch.from_numpy(load_hf_numpy(self.get_file_format(A_ , A_ ) ) ).to(A_ ).to(A_ )
return image
def _a ( self , A_="CompVis/stable-diffusion-v1-4" , A_=False ) -> Dict:
__UpperCamelCase ='fp16' if fpaa else None
__UpperCamelCase =torch.floataa if fpaa else torch.floataa
__UpperCamelCase =AutoencoderKL.from_pretrained(
A_ , subfolder='vae' , torch_dtype=A_ , revision=A_ , )
model.to(A_ ).eval()
return model
def _a ( self , A_=0 ) -> Any:
if torch_device == "mps":
return torch.manual_seed(A_ )
return torch.Generator(device=A_ ).manual_seed(A_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _a ( self , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =self.get_sd_vae_model()
__UpperCamelCase =self.get_sd_image(A_ )
__UpperCamelCase =self.get_generator(A_ )
with torch.no_grad():
__UpperCamelCase =model(A_ , generator=A_ , sample_posterior=A_ ).sample
assert sample.shape == image.shape
__UpperCamelCase =sample[-1, -2:, -2:, :2].flatten().float().cpu()
__UpperCamelCase =torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(A_ , A_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def _a ( self , A_ , A_ ) -> int:
__UpperCamelCase =self.get_sd_vae_model(fpaa=A_ )
__UpperCamelCase =self.get_sd_image(A_ , fpaa=A_ )
__UpperCamelCase =self.get_generator(A_ )
with torch.no_grad():
__UpperCamelCase =model(A_ , generator=A_ , sample_posterior=A_ ).sample
assert sample.shape == image.shape
__UpperCamelCase =sample[-1, -2:, :2, -2:].flatten().float().cpu()
__UpperCamelCase =torch.tensor(A_ )
assert torch_all_close(A_ , A_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.get_sd_vae_model()
__UpperCamelCase =self.get_sd_image(A_ )
with torch.no_grad():
__UpperCamelCase =model(A_ ).sample
assert sample.shape == image.shape
__UpperCamelCase =sample[-1, -2:, -2:, :2].flatten().float().cpu()
__UpperCamelCase =torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(A_ , A_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def _a ( self , A_ , A_ ) -> str:
__UpperCamelCase =self.get_sd_vae_model()
__UpperCamelCase =self.get_sd_image(A_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__UpperCamelCase =model.decode(A_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__UpperCamelCase =sample[-1, -2:, :2, -2:].flatten().cpu()
__UpperCamelCase =torch.tensor(A_ )
assert torch_all_close(A_ , A_ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def _a ( self , A_ , A_ ) -> List[str]:
__UpperCamelCase =self.get_sd_vae_model(fpaa=A_ )
__UpperCamelCase =self.get_sd_image(A_ , shape=(3, 4, 64, 64) , fpaa=A_ )
with torch.no_grad():
__UpperCamelCase =model.decode(A_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__UpperCamelCase =sample[-1, -2:, :2, -2:].flatten().float().cpu()
__UpperCamelCase =torch.tensor(A_ )
assert torch_all_close(A_ , A_ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =self.get_sd_vae_model(fpaa=A_ )
__UpperCamelCase =self.get_sd_image(A_ , shape=(3, 4, 64, 64) , fpaa=A_ )
with torch.no_grad():
__UpperCamelCase =model.decode(A_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__UpperCamelCase =model.decode(A_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(A_ , A_ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def _a ( self , A_ ) -> List[str]:
__UpperCamelCase =self.get_sd_vae_model()
__UpperCamelCase =self.get_sd_image(A_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__UpperCamelCase =model.decode(A_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__UpperCamelCase =model.decode(A_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(A_ , A_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def _a ( self , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.get_sd_vae_model()
__UpperCamelCase =self.get_sd_image(A_ )
__UpperCamelCase =self.get_generator(A_ )
with torch.no_grad():
__UpperCamelCase =model.encode(A_ ).latent_dist
__UpperCamelCase =dist.sample(generator=A_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__UpperCamelCase =sample[0, -1, -3:, -3:].flatten().cpu()
__UpperCamelCase =torch.tensor(A_ )
__UpperCamelCase =3E-3 if torch_device != 'mps' else 1E-2
assert torch_all_close(A_ , A_ , atol=A_ )
| 117
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : float = 3.0
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=A_ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def _a ( self ) -> Union[str, Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
__UpperCamelCase =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase =Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , A_ )
@require_multi_gpu
def _a ( self ) -> Optional[int]:
__UpperCamelCase =['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
_A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
_A = Accelerator(kwargs_handlers=[ddp_scaler])
_A = torch.nn.Linear(100, 200)
_A = accelerator.prepare(model)
# Check the values changed in kwargs
_A = ''
_A = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 117
| 1
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 218
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_lowerCAmelCase : str = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_lowerCAmelCase : List[Any] = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_lowerCAmelCase : str = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_lowerCAmelCase : List[str] = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
_lowerCAmelCase : Optional[Any] = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
_lowerCAmelCase : List[str] = {
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
if isinstance(_snake_case , _snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase_( _snake_case : Tuple , _snake_case : List[str] , _snake_case : Any , _snake_case : str , _snake_case : Union[str, Any]=False ):
"""simple docstring"""
__a =checkpoint[F'{old_prefix}.in_layers.0.weight']
__a =checkpoint[F'{old_prefix}.in_layers.0.bias']
__a =checkpoint[F'{old_prefix}.in_layers.2.weight']
__a =checkpoint[F'{old_prefix}.in_layers.2.bias']
__a =checkpoint[F'{old_prefix}.emb_layers.1.weight']
__a =checkpoint[F'{old_prefix}.emb_layers.1.bias']
__a =checkpoint[F'{old_prefix}.out_layers.0.weight']
__a =checkpoint[F'{old_prefix}.out_layers.0.bias']
__a =checkpoint[F'{old_prefix}.out_layers.3.weight']
__a =checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
__a =checkpoint[F'{old_prefix}.skip_connection.weight']
__a =checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def UpperCamelCase_( _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Optional[int]=None ):
"""simple docstring"""
__a , __a , __a =checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
__a , __a , __a =checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
__a =checkpoint[F'{old_prefix}.norm.weight']
__a =checkpoint[F'{old_prefix}.norm.bias']
__a =weight_q.squeeze(-1 ).squeeze(-1 )
__a =bias_q.squeeze(-1 ).squeeze(-1 )
__a =weight_k.squeeze(-1 ).squeeze(-1 )
__a =bias_k.squeeze(-1 ).squeeze(-1 )
__a =weight_v.squeeze(-1 ).squeeze(-1 )
__a =bias_v.squeeze(-1 ).squeeze(-1 )
__a =(
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
__a =checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase_( _snake_case : str , _snake_case : Tuple ):
"""simple docstring"""
__a =torch.load(_snake_case , map_location='cpu' )
__a ={}
__a =checkpoint['time_embed.0.weight']
__a =checkpoint['time_embed.0.bias']
__a =checkpoint['time_embed.2.weight']
__a =checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
__a =checkpoint['label_emb.weight']
__a =checkpoint['input_blocks.0.0.weight']
__a =checkpoint['input_blocks.0.0.bias']
__a =unet_config['down_block_types']
__a =unet_config['layers_per_block']
__a =unet_config['attention_head_dim']
__a =unet_config['block_out_channels']
__a =1
__a =channels_list[0]
for i, layer_type in enumerate(_snake_case ):
__a =channels_list[i]
__a =current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_snake_case ):
__a =F'down_blocks.{i}.resnets.{j}'
__a =F'input_blocks.{current_layer}.0'
__a =True if j == 0 and downsample_block_has_skip else False
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_snake_case ):
__a =F'down_blocks.{i}.resnets.{j}'
__a =F'input_blocks.{current_layer}.0'
__a =True if j == 0 and downsample_block_has_skip else False
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
__a =F'down_blocks.{i}.attentions.{j}'
__a =F'input_blocks.{current_layer}.1'
__a =convert_attention(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
__a =F'down_blocks.{i}.downsamplers.0'
__a =F'input_blocks.{current_layer}.0'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
__a =current_channels
# hardcoded the mid-block for now
__a ='mid_block.resnets.0'
__a ='middle_block.0'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
__a ='mid_block.attentions.0'
__a ='middle_block.1'
__a =convert_attention(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
__a ='mid_block.resnets.1'
__a ='middle_block.2'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
__a =0
__a =unet_config['up_block_types']
for i, layer_type in enumerate(_snake_case ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__a =F'up_blocks.{i}.resnets.{j}'
__a =F'output_blocks.{current_layer}.0'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
__a =F'up_blocks.{i}.upsamplers.0'
__a =F'output_blocks.{current_layer-1}.1'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__a =F'up_blocks.{i}.resnets.{j}'
__a =F'output_blocks.{current_layer}.0'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
__a =F'up_blocks.{i}.attentions.{j}'
__a =F'output_blocks.{current_layer}.1'
__a =convert_attention(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
__a =F'up_blocks.{i}.upsamplers.0'
__a =F'output_blocks.{current_layer-1}.2'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
__a =checkpoint['out.0.weight']
__a =checkpoint['out.0.bias']
__a =checkpoint['out.2.weight']
__a =checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
_lowerCAmelCase : Optional[Any] = parser.parse_args()
_lowerCAmelCase : Optional[Any] = strabool(args.class_cond)
_lowerCAmelCase : Dict = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
_lowerCAmelCase : Tuple = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowerCAmelCase : Optional[int] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_lowerCAmelCase : int = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Tuple = con_pt_to_diffuser(args.unet_path, unet_config)
_lowerCAmelCase : Optional[int] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_lowerCAmelCase : int = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_lowerCAmelCase : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowerCAmelCase : List[str] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
_lowerCAmelCase : Any = CMStochasticIterativeScheduler(**scheduler_config)
_lowerCAmelCase : str = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 218
| 1
|
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _snake_case :
__A : float
__A : TreeNode | None =None
__A : TreeNode | None =None
def a__ ( _SCREAMING_SNAKE_CASE : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
_SCREAMING_SNAKE_CASE : TreeNode | None , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self ,_snake_case ,_snake_case=12 ,_snake_case=7 ,_snake_case=True ,_snake_case=True ,_snake_case=True ,_snake_case=99 ,_snake_case=32 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=5_12 ,_snake_case=0.02 ,_snake_case=0 ,_snake_case=None ,):
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : Optional[int] = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : str = use_input_mask
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Any = projection_dim
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : Any = dropout
UpperCAmelCase_ : Dict = attention_dropout
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = scope
UpperCAmelCase_ : List[str] = bos_token_id
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase_ : Any = input_mask.numpy()
UpperCAmelCase_ , UpperCAmelCase_ : str = input_mask.shape
UpperCAmelCase_ : str = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def UpperCamelCase__ ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = TFBlipTextModel(config=_snake_case )
UpperCAmelCase_ : Optional[int] = model(_snake_case ,attention_mask=_snake_case ,training=_snake_case )
UpperCAmelCase_ : Dict = model(_snake_case ,training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = config_and_inputs
UpperCAmelCase_ : str = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Tuple =(TFBlipTextModel,) if is_tf_available() else ()
__A : List[Any] =False
__A : List[Any] =False
__A : Any =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = BlipTextModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCamelCase__ ( self ):
pass
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : int = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCamelCase__ ( self ,_snake_case=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 67
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCAmelCase : Any = None
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = "▁"
UpperCAmelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : int = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
UpperCAmelCase : str = {
"google/pegasus-xsum": 5_12,
}
class _A( _a ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[Any] = PegasusTokenizer
UpperCamelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , _A=None , _A=None , _A="<pad>" , _A="</s>" , _A="<unk>" , _A="<mask_2>" , _A="<mask_1>" , _A=None , _A=103 , **_A , ):
__A : str = offset
if additional_special_tokens is not None:
if not isinstance(_A , _A ):
raise TypeError(
F"""additional_special_tokens should be of type {type(_A )}, but is"""
F""" {type(_A )}""" )
__A : Any = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(_A ) , self.offset - 1 )
]
if len(set(_A ) ) != len(_A ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
__A : Any = additional_special_tokens_extended
else:
__A : Optional[Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
_A , tokenizer_file=_A , pad_token=_A , eos_token=_A , unk_token=_A , mask_token=_A , mask_token_sent=_A , offset=_A , additional_special_tokens=_A , **_A , )
__A : Optional[Any] = vocab_file
__A : List[str] = False if not self.vocab_file else True
def UpperCAmelCase_ ( self , _A ):
__A : str = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase_ ( self , _A , _A = None , _A = False ):
if already_has_special_tokens:
return self._special_token_mask(_A )
elif token_ids_a is None:
return self._special_token_mask(_A ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase_ ( self , _A , _A=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self , _A , _A = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A : Any = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 280
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21
| 0
|
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE_: str =tuple[int, int, int]
SCREAMING_SNAKE_CASE_: Optional[int] =tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
SCREAMING_SNAKE_CASE_: List[str] ='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
SCREAMING_SNAKE_CASE_: Tuple ='EGZWVONAHDCLFQMSIPJBYUKXTR'
SCREAMING_SNAKE_CASE_: str ='FOBHMDKEXQNRAULPGSJVTYICZW'
SCREAMING_SNAKE_CASE_: List[Any] ='ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
SCREAMING_SNAKE_CASE_: List[str] ={
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
SCREAMING_SNAKE_CASE_: Union[str, Any] ='RMDJXFUWGISLHVTCQNKYPBEZOA'
SCREAMING_SNAKE_CASE_: Optional[int] ='SGLCPQWZHKXAREONTFBVIYJUDM'
SCREAMING_SNAKE_CASE_: int ='HVSICLTYKQUBXDWAJZOMFGPREN'
SCREAMING_SNAKE_CASE_: Dict ='RZWQHFMVDBKICJLNTUXAGYPSOE'
SCREAMING_SNAKE_CASE_: Tuple ='LFKIJODBEGAMQPXVUHYSTCZRWN'
SCREAMING_SNAKE_CASE_: List[Any] ='KOAEGVDHXPQZMLFTYWJNBRCIUS'
def lowerCAmelCase_ ( snake_case_ : RotorPositionT , snake_case_ : RotorSelectionT , snake_case_ : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
'''simple docstring'''
if (unique_rotsel := len(set(snake_case_ ) )) < 3:
UpperCAmelCase_ = f"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(snake_case_ )
# Checks if rotor positions are valid
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = rotpos
if not 0 < rotorposa <= len(snake_case_ ):
UpperCAmelCase_ = f"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(snake_case_ )
if not 0 < rotorposa <= len(snake_case_ ):
UpperCAmelCase_ = f"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(snake_case_ )
if not 0 < rotorposa <= len(snake_case_ ):
UpperCAmelCase_ = f"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(snake_case_ )
# Validates string and returns dict
UpperCAmelCase_ = _plugboard(snake_case_ )
return rotpos, rotsel, pbdict
def lowerCAmelCase_ ( snake_case_ : str ) -> dict[str, str]:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ = f"""Plugboard setting isn't type string ({type(snake_case_ )})"""
raise TypeError(snake_case_ )
elif len(snake_case_ ) % 2 != 0:
UpperCAmelCase_ = f"""Odd number of symbols ({len(snake_case_ )})"""
raise Exception(snake_case_ )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
UpperCAmelCase_ = set()
for i in pbstring:
if i not in abc:
UpperCAmelCase_ = f"""'{i}' not in list of symbols"""
raise Exception(snake_case_ )
elif i in tmppbl:
UpperCAmelCase_ = f"""Duplicate symbol ({i})"""
raise Exception(snake_case_ )
else:
tmppbl.add(snake_case_ )
del tmppbl
# Created the dictionary
UpperCAmelCase_ = {}
for j in range(0 , len(snake_case_ ) - 1 , 2 ):
UpperCAmelCase_ = pbstring[j + 1]
UpperCAmelCase_ = pbstring[j]
return pb
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : RotorPositionT , snake_case_ : RotorSelectionT = (rotora, rotora, rotora) , snake_case_ : str = "" , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = text.upper()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = _validator(
snake_case_ , snake_case_ , plugb.upper() )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = rotor_position
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
UpperCAmelCase_ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
UpperCAmelCase_ = plugboard[symbol]
# rotor ra --------------------------
UpperCAmelCase_ = abc.index(snake_case_ ) + rotorposa
UpperCAmelCase_ = rotora[index % len(snake_case_ )]
# rotor rb --------------------------
UpperCAmelCase_ = abc.index(snake_case_ ) + rotorposa
UpperCAmelCase_ = rotora[index % len(snake_case_ )]
# rotor rc --------------------------
UpperCAmelCase_ = abc.index(snake_case_ ) + rotorposa
UpperCAmelCase_ = rotora[index % len(snake_case_ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
UpperCAmelCase_ = reflector[symbol]
# 2nd rotors
UpperCAmelCase_ = abc[rotora.index(snake_case_ ) - rotorposa]
UpperCAmelCase_ = abc[rotora.index(snake_case_ ) - rotorposa]
UpperCAmelCase_ = abc[rotora.index(snake_case_ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
UpperCAmelCase_ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(snake_case_ ):
UpperCAmelCase_ = 0
rotorposa += 1
if rotorposa >= len(snake_case_ ):
UpperCAmelCase_ = 0
rotorposa += 1
if rotorposa >= len(snake_case_ ):
UpperCAmelCase_ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(snake_case_ )
return "".join(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] ='This is my Python script that emulates the Enigma machine from WWII.'
SCREAMING_SNAKE_CASE_: Optional[Any] =(1, 1, 1)
SCREAMING_SNAKE_CASE_: Union[str, Any] ='pictures'
SCREAMING_SNAKE_CASE_: Optional[Any] =(rotora, rotora, rotora)
SCREAMING_SNAKE_CASE_: Dict =enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 365
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Any , snake_case_ : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = 0
if start < end:
UpperCAmelCase_ = randint(snake_case_ , snake_case_ )
UpperCAmelCase_ = a[end]
UpperCAmelCase_ = a[pivot]
UpperCAmelCase_ = temp
UpperCAmelCase_ , UpperCAmelCase_ = _in_place_partition(snake_case_ , snake_case_ , snake_case_ )
count += _in_place_quick_sort(snake_case_ , snake_case_ , p - 1 )
count += _in_place_quick_sort(snake_case_ , p + 1 , snake_case_ )
return count
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = randint(snake_case_ , snake_case_ )
UpperCAmelCase_ = a[end]
UpperCAmelCase_ = a[pivot]
UpperCAmelCase_ = temp
UpperCAmelCase_ = start - 1
for index in range(snake_case_ , snake_case_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCAmelCase_ = new_pivot_index + 1
UpperCAmelCase_ = a[new_pivot_index]
UpperCAmelCase_ = a[index]
UpperCAmelCase_ = temp
UpperCAmelCase_ = a[new_pivot_index + 1]
UpperCAmelCase_ = a[end]
UpperCAmelCase_ = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_: List[str] =TemporaryFile()
SCREAMING_SNAKE_CASE_: int =1_00 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: str =0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_: List[str] =np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_: str =np.load(outfile)
SCREAMING_SNAKE_CASE_: List[Any] =len(M) - 1
SCREAMING_SNAKE_CASE_: Dict =_in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 106
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase ={
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67
|
"""simple docstring"""
a : Any = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
a : List[Any] = {value: key for key, value in encode_dict.items()}
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
a : int = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
if set(_lowercase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
a : Optional[Any] = ""
for word in coded.split():
while len(_lowercase ) != 0:
decoded += decode_dict[word[:5]]
a : List[Any] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 105
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_snake_case = logging.get_logger(__name__)
_snake_case = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowercase ( UpperCamelCase__ ):
_a = "dpt"
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-12 , _a=384 , _a=16 , _a=3 , _a=False , _a=True , _a=[2, 5, 8, 11] , _a="project" , _a=[4, 2, 1, 0.5] , _a=[96, 192, 384, 768] , _a=256 , _a=-1 , _a=False , _a=True , _a=0.4 , _a=255 , _a=0.1 , _a=[1, 1024, 24, 24] , _a=[0, 1] , _a=None , **_a , ) -> Dict:
super().__init__(**_a )
_A : List[str] = hidden_size
_A : Tuple = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
_A : List[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
_A : str = BitConfig(**_a )
elif isinstance(_a , _a ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
_A : Union[str, Any] = BitConfig(**_a )
elif isinstance(_a , _a ):
_A : Optional[Any] = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
_A : int = backbone_featmap_shape
_A : List[Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
_A : Tuple = None
_A : Optional[int] = None
_A : Any = []
_A : Union[str, Any] = num_hidden_layers
_A : Optional[int] = num_attention_heads
_A : Optional[int] = intermediate_size
_A : str = hidden_act
_A : List[str] = hidden_dropout_prob
_A : Union[str, Any] = attention_probs_dropout_prob
_A : Any = initializer_range
_A : Union[str, Any] = layer_norm_eps
_A : Any = image_size
_A : List[Any] = patch_size
_A : Tuple = num_channels
_A : int = qkv_bias
_A : str = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
_A : str = readout_type
_A : int = reassemble_factors
_A : Optional[int] = neck_hidden_sizes
_A : Tuple = fusion_hidden_size
_A : Optional[int] = head_in_index
_A : int = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_A : str = use_auxiliary_head
_A : Optional[int] = auxiliary_loss_weight
_A : Optional[int] = semantic_loss_ignore_index
_A : Dict = semantic_classifier_dropout
def a__ ( self ) -> Optional[Any]:
_A : str = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_A : List[Any] = self.backbone_config.to_dict()
_A : str = self.__class__.model_type
return output
| 343
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_snake_case = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def lowerCAmelCase_ ( snake_case_ = "mumbai" ):
_A : Optional[Any] = BeautifulSoup(requests.get(url + location ).content,"""html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""",attrs={"""data-tn-component""": """organicJob"""} ):
_A : Tuple = job.find("""a""",attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
_A : Optional[int] = job.find("""span""",{"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 343
| 1
|
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__UpperCamelCase = HUGGINGFACE_HUB_CACHE
__UpperCamelCase = '''config.json'''
__UpperCamelCase = '''diffusion_pytorch_model.bin'''
__UpperCamelCase = '''diffusion_flax_model.msgpack'''
__UpperCamelCase = '''model.onnx'''
__UpperCamelCase = '''diffusion_pytorch_model.safetensors'''
__UpperCamelCase = '''weights.pb'''
__UpperCamelCase = '''https://huggingface.co'''
__UpperCamelCase = default_cache_path
__UpperCamelCase = '''diffusers_modules'''
__UpperCamelCase = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
__UpperCamelCase = ['''fp16''', '''non-ema''']
__UpperCamelCase = '''.self_attn'''
| 69
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Optional[Any] ={"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :Optional[int] = "openai-gpt"
__lowerCAmelCase :Any = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __lowercase=4_0_4_7_8 , __lowercase=5_1_2 , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1E-5 , __lowercase=0.0_2 , __lowercase="cls_index" , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=0.1 , **__lowercase , ) -> Optional[int]:
"""simple docstring"""
a__ : Tuple = vocab_size
a__ : Union[str, Any] = n_positions
a__ : int = n_embd
a__ : Dict = n_layer
a__ : Dict = n_head
a__ : List[str] = afn
a__ : List[str] = resid_pdrop
a__ : List[Any] = embd_pdrop
a__ : List[str] = attn_pdrop
a__ : Dict = layer_norm_epsilon
a__ : List[str] = initializer_range
a__ : Tuple = summary_type
a__ : Union[str, Any] = summary_use_proj
a__ : Optional[Any] = summary_activation
a__ : Union[str, Any] = summary_first_dropout
a__ : Optional[Any] = summary_proj_to_labels
super().__init__(**__lowercase )
| 170
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE ={
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =["MobileNetV2FeatureExtractor"]
__SCREAMING_SNAKE_CASE =["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 321
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) )
lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowercase_ : Tuple = {'unk_token': '<unk>'}
lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
lowercase_ : Any = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : List[Any] = self.get_rust_tokenizer()
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase )
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 )
lowercase_ : Any = CLIPSegProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = self.prepare_image_inputs()
lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' )
lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Dict = 'lower newer'
lowercase_ : Any = processor(text=__UpperCamelCase )
lowercase_ : int = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.get_image_processor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = 'lower newer'
lowercase_ : str = self.prepare_image_inputs()
lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = self.prepare_image_inputs()
lowercase_ : Optional[Any] = self.prepare_image_inputs()
lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
| 321
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
lowerCamelCase__ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Any = BertTokenizer
def __init__( self : int , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : str=True , lowerCamelCase__ : Tuple="[UNK]" , lowerCamelCase__ : str="[SEP]" , lowerCamelCase__ : Optional[Any]="[PAD]" , lowerCamelCase__ : List[str]="[CLS]" , lowerCamelCase__ : Union[str, Any]="[MASK]" , lowerCamelCase__ : str=True , lowerCamelCase__ : Dict=None , **lowerCamelCase__ : Union[str, Any] , ) ->Tuple:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCamelCase__ ) != tokenize_chinese_chars
):
_UpperCAmelCase : str = getattr(lowerCamelCase__ , normalizer_state.pop("type" ) )
_UpperCAmelCase : Optional[Any] = do_lower_case
_UpperCAmelCase : Any = strip_accents
_UpperCAmelCase : List[Any] = tokenize_chinese_chars
_UpperCAmelCase : int = normalizer_class(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = do_lower_case
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=None ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = [self.sep_token_id]
_UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 234
|
'''simple docstring'''
import math
def __lowerCAmelCase (__lowerCAmelCase ):
return math.sqrt(__lowerCAmelCase ) * math.sqrt(__lowerCAmelCase ) == num
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = 0
_UpperCAmelCase : Tuple = n
while left <= right:
_UpperCAmelCase : int = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_UpperCAmelCase : str = mid - 1
else:
_UpperCAmelCase : List[str] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234
| 1
|
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
A : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class __A( __snake_case , unittest.TestCase ):
snake_case_ = BartphoTokenizer
snake_case_ = False
snake_case_ = True
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
super().setUp()
__a = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__a = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__a = {'''unk_token''': '''<unk>'''}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
__a = BartphoTokenizer(_snake_case , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = '''This is a là test'''
__a = '''This is a<unk><unk> test'''
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = BartphoTokenizer(_snake_case , self.monolingual_vocab_file , **self.special_tokens_map )
__a = '''This is a là test'''
__a = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
__a = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__a = tokens + [tokenizer.unk_token]
__a = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
| 361
|
import os
import numpy
import onnx
def __lowerCAmelCase ( a__ , a__ ) -> List[str]:
__a = a.name
__a = b.name
__a = ''''''
__a = ''''''
__a = a == b
__a = name_a
__a = name_b
return res
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a__ , a__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
_graph_replace_input_with(node_proto.attribute[1].g , a__ , a__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> str:
for n in graph_proto.node:
_node_replace_input_with(a__ , a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Union[str, Any]:
__a = list(model.graph.initializer )
__a = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__a = inits[i].name
__a = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a__ , a__ )
def __lowerCAmelCase ( a__ ) -> str:
__a = os.path.dirname(a__ )
__a = os.path.basename(a__ )
__a = onnx.load(os.path.join(a__ , a__ ) )
__a = list(model.graph.initializer )
__a = set()
__a = {}
__a = []
__a = 0
for i in range(len(a__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a__ )
dup_set.add(a__ )
__a = inits[j].data_type
__a = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , a__ )
total_reduced_size += mem_size
__a = inits[i].name
__a = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a__ )
else:
__a = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1024 / 1024 / 1024 , '''GB''' )
__a = sorted(a__ )
_remove_dup_initializers_from_model(a__ , a__ , a__ )
__a = '''optimized_''' + model_file_name
__a = os.path.join(a__ , a__ )
onnx.save(a__ , a__ )
return new_model
| 33
| 0
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowercase ( a__ : Optional[int] , a__ : List[str] , a__ : str = 10**-10 ) -> Optional[Any]:
_UpperCamelCase = a
while True:
_UpperCamelCase = Decimal(lowercase__ ) - (
Decimal(eval(lowercase__ ) ) / Decimal(eval(str(diff(lowercase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowercase__ ) ) < precision: # noqa: S307
return float(lowercase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 256
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _lowercase ( lowercase__ ):
__lowerCAmelCase : str = []
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : str = []
for rt in rc.restypes:
__lowerCAmelCase : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__lowerCAmelCase : List[str] = {name: i for i, name in enumerate(lowercase__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
__lowerCAmelCase : List[Any] = torch.tensor(
lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
__lowerCAmelCase : Optional[Any] = torch.tensor(
lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
__lowerCAmelCase : Tuple = torch.tensor(
lowercase__ , dtype=torch.floataa , device=protein['''aatype'''].device , )
__lowerCAmelCase : List[Any] = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__lowerCAmelCase : Any = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase : int = residx_atomaa_mask
__lowerCAmelCase : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__lowerCAmelCase : int = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase : Union[str, Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__lowerCAmelCase : str = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
__lowerCAmelCase : Optional[int] = rc.restype_atoa[restype_letter]
__lowerCAmelCase : Optional[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__lowerCAmelCase : str = rc.atom_order[atom_name]
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase : Any = residx_atomaa_mask
return protein
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Dict = tree_map(lambda lowercase__ : torch.tensor(lowercase__ , device=batch['''aatype'''].device ) , lowercase__ , np.ndarray )
__lowerCAmelCase : Tuple = tensor_tree_map(lambda lowercase__ : np.array(lowercase__ ) , make_atomaa_masks(lowercase__ ) )
return out
| 275
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
lowercase_ = 0
lowercase_ = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
lowercase_ = [int(__lowerCAmelCase ) for i in num_string]
lowercase_ = 1
for i in range(0 , len(__lowerCAmelCase ) ):
total *= numbers[i]
lowercase_ = str(__lowerCAmelCase )
steps += 1
return steps
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
lowercase_ = 0
lowercase_ = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
lowercase_ = [int(__lowerCAmelCase ) for i in num_string]
lowercase_ = 0
for i in range(0 , len(__lowerCAmelCase ) ):
total += numbers[i]
lowercase_ = str(__lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None):
"""simple docstring"""
lowercase_ = self.layer[current_layer](lowerCAmelCase_ , lowerCAmelCase_ , head_mask[current_layer])
lowercase_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase_ : Dict):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = BertEncoderWithPabee(lowerCAmelCase_)
self.init_weights()
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = threshold
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = patience
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.inference_layers_num / self.inference_instances_num
lowercase_ = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase_)
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""")
elif input_ids is not None:
lowercase_ = input_ids.size()
elif inputs_embeds is not None:
lowercase_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""")
lowercase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
if token_type_ids is None:
lowercase_ = torch.zeros(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase_ = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowercase_ , lowercase_ , lowercase_ = encoder_hidden_states.size()
lowercase_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
lowercase_ = self.invert_attention_mask(lowerCAmelCase_)
else:
lowercase_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase_ = self.get_head_mask(lowerCAmelCase_ , self.config.num_hidden_layers)
lowercase_ = self.embeddings(
input_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_)
lowercase_ = embedding_output
if self.training:
lowercase_ = []
for i in range(self.config.num_hidden_layers):
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](output_dropout(lowerCAmelCase_))
res.append(lowerCAmelCase_)
elif self.patience == 0: # Use all layers for inference
lowercase_ = self.encoder(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
lowercase_ = self.pooler(encoder_outputs[0])
lowercase_ = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase_)]
else:
lowercase_ = 0
lowercase_ = None
lowercase_ = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](lowerCAmelCase_)
if regression:
lowercase_ = logits.detach()
if patient_result is not None:
lowercase_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
lowercase_ = 0
else:
lowercase_ = logits.detach().argmax(dim=1)
if patient_result is not None:
lowercase_ = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase_)):
patient_counter += 1
else:
lowercase_ = 0
lowercase_ = logits
if patient_counter == self.patience:
break
lowercase_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = config.num_labels
lowercase_ = BertModelWithPabee(lowerCAmelCase_)
lowercase_ = nn.Dropout(config.hidden_dropout_prob)
lowercase_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , ):
"""simple docstring"""
lowercase_ = self.bert(
input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowercase_ = (logits[-1],)
if labels is not None:
lowercase_ = None
lowercase_ = 0
for ix, logits_item in enumerate(lowerCAmelCase_):
if self.num_labels == 1:
# We are doing regression
lowercase_ = MSELoss()
lowercase_ = loss_fct(logits_item.view(-1) , labels.view(-1))
else:
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(logits_item.view(-1 , self.num_labels) , labels.view(-1))
if total_loss is None:
lowercase_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowercase_ = (total_loss / total_weights,) + outputs
return outputs
| 313
| 1
|
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE = 1000000 ) ->int:
a__: int = limit + 1
a__: Optional[int] = [0] * limit
for first_term in range(1 , _SCREAMING_SNAKE_CASE ):
for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a__: Any = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 290
|
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase__ = logging.get_logger('transformers.models.encodec')
lowercase__ = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
lowercase__ = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
lowercase__ = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
lowercase__ = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
lowercase__ = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
lowercase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase__ = []
lowercase__ = []
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
for attribute in key.split('.' ):
a__: str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
a__: List[str] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
a__: Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
a__: str = value
elif weight_type == "weight_g":
a__: int = value
elif weight_type == "weight_v":
a__: Tuple = value
elif weight_type == "bias":
a__: Dict = value
elif weight_type == "running_mean":
a__: Any = value
elif weight_type == "running_var":
a__: Tuple = value
elif weight_type == "num_batches_tracked":
a__: List[str] = value
elif weight_type == "weight_ih_l0":
a__: List[Any] = value
elif weight_type == "weight_hh_l0":
a__: List[Any] = value
elif weight_type == "bias_ih_l0":
a__: List[Any] = value
elif weight_type == "bias_hh_l0":
a__: List[Any] = value
elif weight_type == "weight_ih_l1":
a__: int = value
elif weight_type == "weight_hh_l1":
a__: str = value
elif weight_type == "bias_ih_l1":
a__: Union[str, Any] = value
elif weight_type == "bias_hh_l1":
a__: Any = value
else:
a__: Union[str, Any] = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
a__ , a__: Optional[Any] = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
a__: Optional[int] = MAPPING_24K
elif model_name == "encodec_48khz":
a__: List[Any] = MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(F'{name} was ignored' )
continue
a__: int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
a__ , a__: str = key.split('.*.' )
if prefix in name and suffix in name:
a__: List[str] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
a__: List[str] = True
if "*" in mapped_key:
a__: List[str] = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
a__: str = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
a__: int = 'weight_g'
elif "weight_v" in name:
a__: Dict = 'weight_v'
elif "weight_ih_l0" in name:
a__: int = 'weight_ih_l0'
elif "weight_hh_l0" in name:
a__: Union[str, Any] = 'weight_hh_l0'
elif "bias_ih_l0" in name:
a__: Optional[Any] = 'bias_ih_l0'
elif "bias_hh_l0" in name:
a__: Optional[int] = 'bias_hh_l0'
elif "weight_ih_l1" in name:
a__: Dict = 'weight_ih_l1'
elif "weight_hh_l1" in name:
a__: Optional[Any] = 'weight_hh_l1'
elif "bias_ih_l1" in name:
a__: List[str] = 'bias_ih_l1'
elif "bias_hh_l1" in name:
a__: Optional[Any] = 'bias_hh_l1'
elif "bias" in name:
a__: List[str] = 'bias'
elif "weight" in name:
a__: Any = 'weight'
elif "running_mean" in name:
a__: Dict = 'running_mean'
elif "running_var" in name:
a__: Dict = 'running_var'
elif "num_batches_tracked" in name:
a__: Dict = 'num_batches_tracked'
else:
a__: List[str] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) ->int:
if config_path is not None:
a__: Dict = EncodecConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
a__: Tuple = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
a__: Any = [8, 5, 4, 4]
a__: List[str] = [2.2]
a__: List[Any] = 64
a__: Dict = 32000
a__: Union[str, Any] = 2048
a__: Union[str, Any] = False
a__: Any = False
a__: Optional[Any] = False
elif model_name == "encodec_48khz":
a__: Optional[int] = [8, 5, 4, 2]
a__: Union[str, Any] = [3.0, 6.0, 12.0, 24.0]
a__: List[str] = 48000
a__: Tuple = 2
a__: Optional[Any] = False
a__: Optional[int] = 'time_group_norm'
a__: Union[str, Any] = True
a__: Dict = 1.0
a__: str = 0.01
else:
raise ValueError(F'Unknown model name: {model_name}' )
a__: Optional[int] = EncodecModel(_SCREAMING_SNAKE_CASE )
a__: List[str] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
a__: int = torch.load(_SCREAMING_SNAKE_CASE )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
a__: str = original_checkpoint['best_state']
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowercase__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 290
| 1
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
SCREAMING_SNAKE_CASE_ = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE_ = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def UpperCamelCase__ ( _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE = numpy_to_pil(_SCREAMING_SNAKE_CASE )
return images
def UpperCamelCase__ ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if images.ndim == 3:
SCREAMING_SNAKE_CASE = images[None, ...]
SCREAMING_SNAKE_CASE = (images * 2_55).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
SCREAMING_SNAKE_CASE = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
SCREAMING_SNAKE_CASE = [Image.fromarray(_SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 363
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : str = AltDiffusionPipeline
__snake_case : int = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
__snake_case : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5002 ,)
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
SCREAMING_SNAKE_CASE = 77
SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int=0 ) -> Any:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = text_encoder
SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A photo of an astronaut"""
SCREAMING_SNAKE_CASE = alt_pipe(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = text_encoder
SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" ,safety_checker=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe([prompt] ,generator=lowerCamelCase__ ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type="""np""" )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" ,subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" ,scheduler=lowerCamelCase__ ,safety_checker=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe([prompt] ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type="""numpy""" )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 193
| 0
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A: Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
A: Optional[Any] = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
UpperCAmelCase : Dict = self.transformer_dir
shutil.copy(
os.path.join(_SCREAMING_SNAKE_CASE , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Dict:
'''simple docstring'''
UpperCAmelCase : str = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
UpperCAmelCase : List[Any] = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
UpperCAmelCase : str = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
UpperCAmelCase : Optional[int] = black.format_str(_SCREAMING_SNAKE_CASE , mode=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , newline="""\n""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
self.assertTrue(f.read() , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _SCREAMING_SNAKE_CASE ) , )
# Copy consistency with a really long name
UpperCAmelCase : Dict = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub("""Bert""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _SCREAMING_SNAKE_CASE , overwrite_result=re.sub("""Bert""" , """TestModel""" , _SCREAMING_SNAKE_CASE ) , )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Tuple = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
UpperCAmelCase : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
UpperCAmelCase : Dict = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
UpperCAmelCase : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
UpperCAmelCase , UpperCAmelCase : Tuple = check_copies.convert_to_localized_md(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , localized_readme["""format_model_list"""] )
self.assertFalse(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase : Any = check_copies.convert_to_localized_md(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
UpperCAmelCase : Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
UpperCAmelCase : List[str] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
UpperCAmelCase , UpperCAmelCase : List[Any] = check_copies.convert_to_localized_md(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 109
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A: List[str] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Tuple = ["DPTFeatureExtractor"]
A: int = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A: str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109
| 1
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case__ : List[str] = 16
snake_case__ : Optional[Any] = 32
def _lowerCamelCase ( lowerCamelCase_ : Accelerator , lowerCamelCase_ : int = 16 ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCAmelCase_ : Optional[Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCamelCase_ : str ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Any = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : Optional[Any] = 8
else:
UpperCAmelCase_ : Optional[int] = None
return tokenizer.pad(
lowerCamelCase_ , padding='longest' , max_length=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCAmelCase_ : Optional[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case__ : int = mocked_dataloaders # noqa: F811
def _lowerCamelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCamelCase_ ) == "1":
UpperCAmelCase_ : Optional[Any] = 2
# New Code #
UpperCAmelCase_ : Tuple = int(args.gradient_accumulation_steps )
# Initialize accelerator
UpperCAmelCase_ : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCamelCase_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : Dict = config['lr']
UpperCAmelCase_ : List[Any] = int(config['num_epochs'] )
UpperCAmelCase_ : Dict = int(config['seed'] )
UpperCAmelCase_ : List[Any] = int(config['batch_size'] )
UpperCAmelCase_ : Optional[Any] = evaluate.load('glue' , 'mrpc' )
set_seed(lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : int = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : int = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : str = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : Any = AdamW(params=model.parameters() , lr=lowerCamelCase_ )
# Instantiate scheduler
UpperCAmelCase_ : str = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Now we train the model
for epoch in range(lowerCamelCase_ ):
model.train()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCamelCase_ ):
UpperCAmelCase_ : Any = model(**lowerCamelCase_ )
UpperCAmelCase_ : Any = output.loss
accelerator.backward(lowerCamelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCamelCase_ , references=lowerCamelCase_ , )
UpperCAmelCase_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCamelCase_ )
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=lowerCamelCase_ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCAmelCase_ : Any = parser.parse_args()
UpperCAmelCase_ : List[Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 274
|
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str = " " ):
"""simple docstring"""
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[Any] = 0
for index, char in enumerate(lowerCamelCase_ ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase_ : Optional[Any] = index + 1
elif index + 1 == len(lowerCamelCase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 274
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['ConvNextFeatureExtractor']
a_ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 249
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , ):
__lowercase : Tuple = {}
if train_file is not None:
__lowercase : List[Any] = [train_file]
if eval_file is not None:
__lowercase : List[str] = [eval_file]
if test_file is not None:
__lowercase : List[Any] = [test_file]
__lowercase : List[str] = datasets.load_dataset('''csv''' , data_files=__UpperCamelCase )
__lowercase : str = list(ds[list(files.keys() )[0]].features.keys() )
__lowercase : Union[str, Any] = features_name.pop(__UpperCamelCase )
__lowercase : List[str] = list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase : List[str] = {label: i for i, label in enumerate(__UpperCamelCase )}
__lowercase : Optional[Any] = tokenizer.model_input_names
__lowercase : Optional[Any] = {}
if len(__UpperCamelCase ) == 1:
for k in files.keys():
__lowercase : str = ds[k].map(
lambda __UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) , batched=__UpperCamelCase , )
elif len(__UpperCamelCase ) == 2:
for k in files.keys():
__lowercase : List[Any] = ds[k].map(
lambda __UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' , ) , batched=__UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase : Dict = {k: v for k, v in ex.items() if k in input_names}
__lowercase : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase : Tuple = {k: v for k, v in ex.items() if k in input_names}
__lowercase : List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
__lowercase : Dict = labelaid[ex[label_name]]
yield (d, label)
__lowercase : str = (
tf.data.Dataset.from_generator(
__UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase : List[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase : Optional[Any] = (
tf.data.Dataset.from_generator(
__UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase : Tuple = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase : str = (
tf.data.Dataset.from_generator(
__UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase : Optional[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
a_ = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
UpperCamelCase =field(metadata={"help": "Which column contains the label"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "The path of the training file"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "The path of the development file"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "The path of the test file"} )
UpperCamelCase =field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCamelCase =field(
default=snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class UpperCAmelCase_ :
UpperCamelCase =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCamelCase =field(
default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCamelCase =field(
default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase =field(
default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def __UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase ,__lowercase ,__lowercase : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase ,__lowercase ,__lowercase ,__lowercase : Any = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__UpperCamelCase ) , labelaid=__UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(__UpperCamelCase ) -> Dict:
__lowercase : List[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase : Optional[Any] = TFTrainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase : List[Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase : List[Any] = trainer.evaluate()
__lowercase : int = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(__UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(__UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 249
| 1
|
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCamelCase : List[Any] = numpy.array([0, 0])
lowerCamelCase : Tuple = numpy.array([0.5, 0.8_6_6_0_2_5_4])
lowerCamelCase : Any = numpy.array([1, 0])
lowerCamelCase : str = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _lowerCAmelCase ( _UpperCamelCase : list[numpy.ndarray] , _UpperCamelCase : int ) -> list[numpy.ndarray]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =initial_vectors
for _ in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =iteration_step(_UpperCamelCase )
return vectors
def _lowerCAmelCase ( _UpperCamelCase : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for i, start_vector in enumerate(vectors[:-1] ):
_SCREAMING_SNAKE_CASE =vectors[i + 1]
new_vectors.append(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _lowerCAmelCase ( _UpperCamelCase : numpy.ndarray , _UpperCamelCase : float ) -> numpy.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =numpy.radians(_UpperCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =numpy.cos(_UpperCamelCase ), numpy.sin(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : list[numpy.ndarray] ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =zip(*_UpperCamelCase )
plt.plot(_UpperCamelCase , _UpperCamelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 114
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Tuple = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 114
| 1
|
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
a : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class a ( _lowerCamelCase ):
def __init__( self : Any , **lowercase_ : Any ):
super().__init__(**lowercase_ )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : Dict , lowercase_ : Union[np.ndarray, bytes, str] , **lowercase_ : Dict ):
return super().__call__(lowercase_ , **lowercase_ )
def A_ ( self : Union[str, Any] , **lowercase_ : List[str] ):
snake_case_ = {}
if "candidate_labels" in kwargs:
snake_case_ = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
snake_case_ = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def A_ ( self : str , lowercase_ : List[str] , lowercase_ : Optional[int]=None , lowercase_ : Tuple="This is a sound of {}." ):
if isinstance(lowercase_ , lowercase_ ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
snake_case_ = requests.get(lowercase_ ).content
else:
with open(lowercase_ , '''rb''' ) as f:
snake_case_ = f.read()
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = ffmpeg_read(lowercase_ , self.feature_extractor.sampling_rate )
if not isinstance(lowercase_ , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
snake_case_ = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
snake_case_ = candidate_labels
snake_case_ = [hypothesis_template.format(lowercase_ ) for x in candidate_labels]
snake_case_ = self.tokenizer(lowercase_ , return_tensors=self.framework , padding=lowercase_ )
snake_case_ = [text_inputs]
return inputs
def A_ ( self : int , lowercase_ : str ):
snake_case_ = model_inputs.pop('''candidate_labels''' )
snake_case_ = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , lowercase_ ):
snake_case_ = text_inputs[0]
else:
# Batching case.
snake_case_ = text_inputs[0][0]
snake_case_ = self.model(**lowercase_ , **lowercase_ )
snake_case_ = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def A_ ( self : Any , lowercase_ : int ):
snake_case_ = model_outputs.pop('''candidate_labels''' )
snake_case_ = model_outputs['''logits'''][0]
if self.framework == "pt":
snake_case_ = logits.softmax(dim=0 )
snake_case_ = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
snake_case_ = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowercase_ , lowercase_ ) , key=lambda lowercase_ : -x[0] )
]
return result
| 56
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCAmelCase : int = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ):
"""simple docstring"""
super().__init__(**_a )
lowerCamelCase = size if size is not None else {"""shortest_edge""": 256}
lowerCamelCase = get_size_dict(_a , default_to_square=_a )
lowerCamelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" )
lowerCamelCase = do_resize
lowerCamelCase = size
lowerCamelCase = resample
lowerCamelCase = do_center_crop
lowerCamelCase = crop_size
lowerCamelCase = do_rescale
lowerCamelCase = rescale_factor
lowerCamelCase = do_normalize
lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCamelCase = get_resize_output_image_size(_a , size=size["""shortest_edge"""] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_a , size=(size["""height"""], size["""width"""]) , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a , _a = None , **_a ):
"""simple docstring"""
return rescale(_a , scale=_a , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a , _a , _a = None , **_a , ):
"""simple docstring"""
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def _lowerCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
"""simple docstring"""
lowerCamelCase = do_resize if do_resize is not None else self.do_resize
lowerCamelCase = size if size is not None else self.size
lowerCamelCase = get_size_dict(_a , default_to_square=_a )
lowerCamelCase = resample if resample is not None else self.resample
lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase = crop_size if crop_size is not None else self.crop_size
lowerCamelCase = get_size_dict(_a , param_name="""crop_size""" )
lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase = image_mean if image_mean is not None else self.image_mean
lowerCamelCase = image_std if image_std is not None else self.image_std
lowerCamelCase = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase = [to_numpy_array(_a ) for image in images]
if do_resize:
lowerCamelCase = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
lowerCamelCase = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
lowerCamelCase = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
lowerCamelCase = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
lowerCamelCase = [to_channel_dimension_format(_a , _a ) for image in images]
lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_a ):
lowerCamelCase = target_sizes.numpy()
lowerCamelCase = []
for idx in range(len(_a ) ):
lowerCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_a )
lowerCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
lowerCamelCase = logits.argmax(dim=1 )
lowerCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 291
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=4, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_attention_mask
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_choices
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =None
if self.use_attention_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase_ =BertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCAmelCase, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs
lowerCamelCase_ ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs
lowerCamelCase_ =True
lowerCamelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Tuple =True
lowercase : int =(
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaxBertModelTester(self )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaxBertModel.from_pretrained('''bert-base-cased''' )
lowerCamelCase_ =model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase )
| 6
|
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
a_ : Tuple = TypeVar("""T""")
a_ : Dict = Union[List[T], Tuple[T, ...]]
a_ : int = Union[T, List[T], Dict[str, T]]
a_ : Optional[Any] = Union[str, bytes, os.PathLike]
| 6
| 1
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Tuple = """encodec"""
def __init__( self , snake_case__=[1.5, 3.0, 6.0, 12.0, 24.0] , snake_case__=2_4000 , snake_case__=1 , snake_case__=False , snake_case__=None , snake_case__=None , snake_case__=128 , snake_case__=32 , snake_case__=1 , snake_case__=[8, 5, 4, 2] , snake_case__="weight_norm" , snake_case__=7 , snake_case__=7 , snake_case__=3 , snake_case__=2 , snake_case__=True , snake_case__="reflect" , snake_case__=2 , snake_case__=2 , snake_case__=1.0 , snake_case__=1024 , snake_case__=None , snake_case__=True , **snake_case__ , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =target_bandwidths
UpperCAmelCase : Optional[int] =sampling_rate
UpperCAmelCase : Optional[Any] =audio_channels
UpperCAmelCase : str =normalize
UpperCAmelCase : Dict =chunk_length_s
UpperCAmelCase : List[Any] =overlap
UpperCAmelCase : str =hidden_size
UpperCAmelCase : Any =num_filters
UpperCAmelCase : str =num_residual_layers
UpperCAmelCase : List[str] =upsampling_ratios
UpperCAmelCase : Tuple =norm_type
UpperCAmelCase : Optional[Any] =kernel_size
UpperCAmelCase : Any =last_kernel_size
UpperCAmelCase : int =residual_kernel_size
UpperCAmelCase : Tuple =dilation_growth_rate
UpperCAmelCase : str =use_causal_conv
UpperCAmelCase : Union[str, Any] =pad_mode
UpperCAmelCase : List[str] =compress
UpperCAmelCase : List[str] =num_lstm_layers
UpperCAmelCase : Optional[Any] =trim_right_ratio
UpperCAmelCase : Optional[int] =codebook_size
UpperCAmelCase : Dict =codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase : List[str] =use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**snake_case__ )
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] =np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 348
|
__snake_case = '''Input must be a string of 8 numbers plus letter'''
__snake_case = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def lowerCAmelCase_ ( __lowerCAmelCase )-> bool:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : Optional[Any] =f'''Expected string as input, found {type(__lowerCAmelCase ).__name__}'''
raise TypeError(__lowerCAmelCase )
UpperCAmelCase : List[Any] =spanish_id.replace('''-''' , '''''' ).upper()
if len(__lowerCAmelCase ) != 9:
raise ValueError(__lowerCAmelCase )
try:
UpperCAmelCase : int =int(spanish_id_clean[0:8] )
UpperCAmelCase : Optional[int] =spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__lowerCAmelCase ) from ex
if letter.isdigit():
raise ValueError(__lowerCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : List[Any] = """mvp"""
A__ : str = ["""past_key_values"""]
A__ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __UpperCamelCase=5_0_2_6_7 , __UpperCamelCase=1_0_2_4 , __UpperCamelCase=1_2 , __UpperCamelCase=4_0_9_6 , __UpperCamelCase=1_6 , __UpperCamelCase=1_2 , __UpperCamelCase=4_0_9_6 , __UpperCamelCase=1_6 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase="gelu" , __UpperCamelCase=1_0_2_4 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=0.0 , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=True , __UpperCamelCase=2 , __UpperCamelCase=2 , __UpperCamelCase=False , __UpperCamelCase=1_0_0 , __UpperCamelCase=8_0_0 , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = vocab_size
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = d_model
UpperCamelCase_ = encoder_ffn_dim
UpperCamelCase_ = encoder_layers
UpperCamelCase_ = encoder_attention_heads
UpperCamelCase_ = decoder_ffn_dim
UpperCamelCase_ = decoder_layers
UpperCamelCase_ = decoder_attention_heads
UpperCamelCase_ = dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = activation_dropout
UpperCamelCase_ = activation_function
UpperCamelCase_ = init_std
UpperCamelCase_ = encoder_layerdrop
UpperCamelCase_ = decoder_layerdrop
UpperCamelCase_ = classifier_dropout
UpperCamelCase_ = use_cache
UpperCamelCase_ = encoder_layers
UpperCamelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase_ = use_prompt
UpperCamelCase_ = prompt_length
UpperCamelCase_ = prompt_mid_dim
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __UpperCamelCase ):
UpperCamelCase_ = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 261
|
from math import pow, sqrt
def lowerCamelCase__ ( *a__ : float ) -> bool:
UpperCamelCase_ = len(a__ ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCamelCase__ ( a__ : float , a__ : float ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(a__ , a__ )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def lowerCamelCase__ ( a__ : float , a__ : float , a__ : float ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(a__ , a__ , a__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def lowerCamelCase__ ( a__ : float , a__ : float , a__ : float ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(a__ , a__ , a__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def lowerCamelCase__ ( a__ : float , a__ : float , a__ : float ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(a__ , a__ , a__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def lowerCamelCase__ ( a__ : float , a__ : float , a__ : float ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(a__ , a__ , a__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 261
| 1
|
import math
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool:
"""simple docstring"""
a_ : Dict = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__A )
def SCREAMING_SNAKE_CASE_ ( __A : float = 1 / 1_23_45 ) -> int:
"""simple docstring"""
a_ : Optional[int] = 0
a_ : Dict = 0
a_ : Any = 3
while True:
a_ : int = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__A ):
a_ : Optional[Any] = int(__A )
total_partitions += 1
if check_partition_perfect(__A ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__A )
integer += 1
if __name__ == "__main__":
print(F'{solution() = }')
| 32
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "vit_mae"
def __init__( self : Dict , A : List[str]=7_68 , A : Any=12 , A : Union[str, Any]=12 , A : Tuple=30_72 , A : Any="gelu" , A : Tuple=0.0 , A : List[str]=0.0 , A : Tuple=0.02 , A : Tuple=1e-12 , A : int=2_24 , A : Dict=16 , A : int=3 , A : Tuple=True , A : Tuple=16 , A : Optional[Any]=5_12 , A : Union[str, Any]=8 , A : List[Any]=20_48 , A : Dict=0.75 , A : Any=False , **A : Optional[int] , ) -> Union[str, Any]:
super().__init__(**A )
lowercase_ : List[Any] = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : int = initializer_range
lowercase_ : Dict = layer_norm_eps
lowercase_ : Optional[Any] = image_size
lowercase_ : str = patch_size
lowercase_ : Dict = num_channels
lowercase_ : Any = qkv_bias
lowercase_ : Union[str, Any] = decoder_num_attention_heads
lowercase_ : Optional[Any] = decoder_hidden_size
lowercase_ : List[str] = decoder_num_hidden_layers
lowercase_ : List[Any] = decoder_intermediate_size
lowercase_ : Optional[Any] = mask_ratio
lowercase_ : Optional[Any] = norm_pix_loss
| 33
| 0
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
torch.set_grad_enabled(False)
SCREAMING_SNAKE_CASE : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def UpperCamelCase ( _a , _a=1_0_0 , _a=" " ) -> List[str]:
'''simple docstring'''
lowercase_ :Dict = text.split(_a )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )]
def UpperCamelCase ( _a ) -> dict:
'''simple docstring'''
lowercase_ , lowercase_ :List[Any] = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(_a ):
titles.append(title if title is not None else '''''' )
texts.append(_a )
return {"title": titles, "text": texts}
def UpperCamelCase ( _a , _a , _a ) -> dict:
'''simple docstring'''
lowercase_ :Dict = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=_a , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
lowercase_ :Any = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCamelCase ( _a , _a , _a , ) -> Tuple:
'''simple docstring'''
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase_ :List[Any] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase_ :Optional[Any] = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase_ :Any = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a )
lowercase_ :Union[str, Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase_ :List[Any] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
lowercase_ :Tuple = dataset.map(
partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , )
# And finally save your dataset
lowercase_ :Optional[int] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(_a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase_ :Dict = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=_a )
# And save the index
lowercase_ :Any = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(_a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCamelCase :
'''simple docstring'''
lowercase : str =field(
default=str(Path(lowercase__ ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
lowercase : str =field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
lowercase : str =field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
lowercase : Optional[str] =field(
default=str(Path(lowercase__ ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class UpperCamelCase :
'''simple docstring'''
lowercase : Optional[int] =field(
default=lowercase__ , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
lowercase : int =field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class UpperCamelCase :
'''simple docstring'''
lowercase : int =field(
default=768 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
lowercase : int =field(
default=128 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
SCREAMING_SNAKE_CASE : str = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : List[str] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 252
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
SCREAMING_SNAKE_CASE : int = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
SCREAMING_SNAKE_CASE : Optional[Any] = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
SCREAMING_SNAKE_CASE : Any = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 252
| 1
|
# Lint as: python3
import itertools
import os
import re
lowercase__ =re.compile(R'([A-Z]+)([A-Z][a-z])')
lowercase__ =re.compile(R'([a-z\d])([A-Z])')
lowercase__ =re.compile(R'(?<!_)_(?!_)')
lowercase__ =re.compile(R'(_{2,})')
lowercase__ =R'^\w+(\.\w+)*$'
lowercase__ =R'<>:/\|?*'
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] ):
__a : Any = _uppercase_uppercase_re.sub(R'''\1_\2''' , lowerCAmelCase__ )
__a : Tuple = _lowercase_uppercase_re.sub(R'''\1_\2''' , lowerCAmelCase__ )
return name.lower()
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] ):
__a : Union[str, Any] = _single_underscore_re.split(lowerCAmelCase__ )
__a : str = [_multiple_underscores_re.split(lowerCAmelCase__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCAmelCase__ ) if n != '''''' )
def __UpperCamelCase ( lowerCAmelCase__ : int ):
if os.path.basename(lowerCAmelCase__ ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any ):
if os.path.basename(lowerCAmelCase__ ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , lowerCAmelCase__ ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(lowerCAmelCase__ )}-{split}"
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str=None ):
__a : List[Any] = filename_prefix_for_split(lowerCAmelCase__ , lowerCAmelCase__ )
if filetype_suffix:
prefix += f".{filetype_suffix}"
__a : int = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
return f"{filepath}*"
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Dict=None ):
__a : Tuple = filename_prefix_for_split(lowerCAmelCase__ , lowerCAmelCase__ )
__a : List[str] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if shard_lengths:
__a : Tuple = len(lowerCAmelCase__ )
__a : List[Any] = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(lowerCAmelCase__ )]
if filetype_suffix:
__a : List[str] = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
__a : str = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 216
|
from __future__ import annotations
def __UpperCamelCase ( lowerCAmelCase__ : list[float] , lowerCAmelCase__ : list[float] ):
__a : Dict = sorted(numsa + numsa )
__a , __a : Optional[Any] = divmod(len(lowerCAmelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ =[float(x) for x in input('Enter the elements of first array: ').split()]
lowercase__ =[float(x) for x in input('Enter the elements of second array: ').split()]
print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 216
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Dict = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 357
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A__ : List[str] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209
| 0
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (UpperCAmelCase_ , unittest.TestCase ):
UpperCAmelCase__ = LEDTokenizer
UpperCAmelCase__ = LEDTokenizerFast
UpperCAmelCase__ = True
def _lowercase ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase = {'''unk_token''': '''<unk>'''}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def _lowercase ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _lowercase ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _lowercase ( self , _A ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(_A , max_length=len(_A ) , padding=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(_A , padding=_A , return_tensors='''pt''' )
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''labels''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(text_target=_A , max_length=3_2 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(
['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] , padding=_A , truncation=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ['''A long paragraph for summarization.''']
UpperCAmelCase = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(_A , return_tensors='''pt''' )
UpperCAmelCase = tokenizer(text_target=_A , return_tensors='''pt''' )
UpperCAmelCase = inputs['''input_ids''']
UpperCAmelCase = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _lowercase ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = ['''Summary of the text.''', '''Another summary.''']
UpperCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase = tokenizer(_A , padding=_A )
UpperCAmelCase = [[0] * len(_A ) for x in encoded_output['''input_ids''']]
UpperCAmelCase = tokenizer.pad(_A )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , _A )
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase = self.tokenizer_class.from_pretrained(_A , **_A )
UpperCAmelCase = '''A, <mask> AllenNLP sentence.'''
UpperCAmelCase = tokenizer_r.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
UpperCAmelCase = tokenizer_p.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
_A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 273
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a__ : str = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349
| 0
|
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
A : Optional[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(4_2)
A : str = "sshleifer/student_marian_en_ro_6_1"
A : Union[str, Any] = "sshleifer/tiny-mbart"
@require_torch
class __A( snake_case__ ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False , _snake_case=None , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , ) -> List[Any]:
'''simple docstring'''
__a = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=UpperCAmelCase_ , num_train_epochs=1 , distributed=UpperCAmelCase_ , extra_args_str=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , do_predict=UpperCAmelCase_ , )
__a = TrainerState.load_from_json(os.path.join(UpperCAmelCase_ , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__a = [log for log in logs if "eval_loss" in log.keys()]
__a = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__a = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , UpperCAmelCase_ )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=UpperCAmelCase_ )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=UpperCAmelCase_ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=UpperCAmelCase_ , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=UpperCAmelCase_ , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=UpperCAmelCase_ , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=UpperCAmelCase_ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
self.run_seqaseq_quick(
distributed=UpperCAmelCase_ , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=UpperCAmelCase_ )
@require_apex
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
self.run_seqaseq_quick(distributed=UpperCAmelCase_ , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=UpperCAmelCase_ , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
__a = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
__a = experiments[experiment_id]
__a = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
__a = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**UpperCAmelCase_ , extra_args_str=data['''extra_args_str'''] )
__a = len(re.findall(UpperCAmelCase_ , cl.err ) )
self.assertEqual(UpperCAmelCase_ , data['''n_matches'''] )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=UpperCAmelCase_ , learning_rate=3E-4 , num_train_epochs=10 , distributed=UpperCAmelCase_ , )
# Check metrics
__a = TrainerState.load_from_json(os.path.join(UpperCAmelCase_ , '''trainer_state.json''' ) ).log_history
__a = [log for log in logs if "eval_loss" in log.keys()]
__a = eval_metrics[0]
__a = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , UpperCAmelCase_ )
# test if do_predict saves generations and metrics
__a = os.listdir(UpperCAmelCase_ )
__a = {os.path.basename(UpperCAmelCase_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_snake_case ) -> Tuple[int, float]:
__a = "--skip_memory_metrics 0"
__a = self.run_trainer(
max_len=128 , model_name=UpperCAmelCase_ , learning_rate=3E-4 , num_train_epochs=1 , optim=UpperCAmelCase_ , distributed=UpperCAmelCase_ , extra_args_str=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , do_predict=UpperCAmelCase_ , n_gpus_to_use=1 , )
# Check metrics
__a = TrainerState.load_from_json(Path(UpperCAmelCase_ , '''trainer_state.json''' ) ).log_history
__a = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
__a = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
__a = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__a = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__a = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__a = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__a = gpu_peak_mem_orig + gpu_alloc_mem_orig
__a = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__a = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__a = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
UpperCAmelCase_ , UpperCAmelCase_ , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
UpperCAmelCase_ , UpperCAmelCase_ , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
UpperCAmelCase_ , UpperCAmelCase_ , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case = 3E-3 , _snake_case = "adafactor" , _snake_case = False , _snake_case = None , _snake_case = 0 , _snake_case = True , _snake_case = True , _snake_case = True , _snake_case = True , _snake_case = None , ) -> Any:
'''simple docstring'''
__a = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
__a = self.get_auto_remove_tmp_dir()
__a = F"""\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(UpperCAmelCase_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(UpperCAmelCase_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n """.split()
__a = F"""\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(UpperCAmelCase_ )}\n """.split()
__a = "\n --do_predict\n ".split()
__a = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__a = get_gpu_count()
__a = get_torch_dist_unique_port()
__a = F"""\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n """.split()
__a = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCAmelCase_ , env=self.get_env() )
else:
__a = ["run_translation.py"] + args
with patch.object(UpperCAmelCase_ , '''argv''' , UpperCAmelCase_ ):
main()
return output_dir
| 364
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : List[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class __A( a ):
snake_case_ = '''table-transformer'''
snake_case_ = ['''past_key_values''']
snake_case_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _snake_case=True , _snake_case=None , _snake_case=3 , _snake_case=100 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=True , _snake_case="relu" , _snake_case=256 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1.0 , _snake_case=False , _snake_case="sine" , _snake_case="resnet50" , _snake_case=True , _snake_case=False , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=1 , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=0.1 , **_snake_case , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_snake_case , _snake_case ):
__a = backbone_config.get('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_snake_case )
# set timm attributes to None
__a , __a , __a = None, None, None
__a = use_timm_backbone
__a = backbone_config
__a = num_channels
__a = num_queries
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = init_xavier_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = encoder_layers
__a = auxiliary_loss
__a = position_embedding_type
__a = backbone
__a = use_pretrained_backbone
__a = dilation
# Hungarian matcher
__a = class_cost
__a = bbox_cost
__a = giou_cost
# Loss coefficients
__a = mask_loss_coefficient
__a = dice_loss_coefficient
__a = bbox_loss_coefficient
__a = giou_loss_coefficient
__a = eos_coefficient
super().__init__(is_encoder_decoder=_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.d_model
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return 12
| 33
| 0
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
_lowerCAmelCase = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
_lowerCAmelCase = '''>>zh<<'''
_lowerCAmelCase = '''Helsinki-NLP/'''
if is_torch_available():
_lowerCAmelCase = '''pt'''
elif is_tf_available():
_lowerCAmelCase = '''tf'''
else:
_lowerCAmelCase = '''jax'''
@require_sentencepiece
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Any = MarianTokenizer
__lowercase : Optional[int] = False
__lowercase : Any = True
def UpperCAmelCase_ ( self ) -> str:
super().setUp()
lowerCAmelCase__ : Dict = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCAmelCase__ : Optional[int] = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : Tuple = Path(self.tmpdirname )
save_json(__UpperCAmelCase ,save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(__UpperCAmelCase ,save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__UpperCAmelCase ,save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(__UpperCAmelCase ,save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowerCAmelCase__ : List[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Optional[int] = """</s>"""
lowerCAmelCase__ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) ,__UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""</s>""" )
self.assertEqual(vocab_keys[1] ,"""<unk>""" )
self.assertEqual(vocab_keys[-1] ,"""<pad>""" )
self.assertEqual(len(__UpperCAmelCase ) ,9 )
def UpperCAmelCase_ ( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size ,9 )
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Optional[int] = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
lowerCAmelCase__ : Optional[Any] = en_de_tokenizer(["""I am a small frog"""] ,return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : int = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(__UpperCAmelCase ,batch.input_ids[0] )
lowerCAmelCase__ : List[str] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = [x.name for x in Path(__UpperCAmelCase ).glob("""*""" )]
self.assertIn("""source.spm""" ,__UpperCAmelCase )
MarianTokenizer.from_pretrained(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase__ : Union[str, Any] = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] ,padding=__UpperCAmelCase ,truncation=__UpperCAmelCase ,return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
self.assertEqual(batch.input_ids.shape ,(2, 512) )
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] ,padding=__UpperCAmelCase ,return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
self.assertEqual(batch_smaller.input_ids.shape ,(2, 10) )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
# fmt: off
lowerCAmelCase__ : List[str] = {"""input_ids""": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase ,model_name="""Helsinki-NLP/opus-mt-en-de""" ,revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" ,decode_kwargs={"""use_source_tokenizer""": True} ,)
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Optional[int] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowerCAmelCase__ : Tuple = """Tämä on testi"""
lowerCAmelCase__ : str = """This is a test"""
lowerCAmelCase__ : Tuple = [76, 7, 2047, 2]
lowerCAmelCase__ : Any = [69, 12, 11, 940, 2]
lowerCAmelCase__ : Union[str, Any] = tokenizer(__UpperCAmelCase ).input_ids
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = tokenizer(text_target=__UpperCAmelCase ).input_ids
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : List[str] = tokenizer.decode(__UpperCAmelCase ,skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
| 37
|
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
UpperCAmelCase : Any = get_logger(__name__)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : int=None ):
'''simple docstring'''
__UpperCAmelCase : Tuple = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase ) )
__UpperCAmelCase : Any = module._original_module if isinstance(UpperCamelCase , _PatchedModuleObj ) else module
class lowerCamelCase__ :
"""simple docstring"""
__a = []
def __init__( self : str , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any]=None ):
'''simple docstring'''
__UpperCAmelCase : int = obj
__UpperCAmelCase : Union[str, Any] = target
__UpperCAmelCase : List[str] = new
__UpperCAmelCase : Optional[int] = target.split(""".""" )[0]
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Union[str, Any] = attrs or []
def __enter__( self : Dict ):
'''simple docstring'''
*__UpperCAmelCase ,__UpperCAmelCase : str = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(UpperCamelCase ) ):
try:
__UpperCAmelCase : List[Any] = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__UpperCAmelCase : List[Any] = getattr(self.obj , UpperCamelCase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(UpperCamelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__UpperCAmelCase : Tuple = obj_attr
# patch at top level
setattr(self.obj , UpperCamelCase , _PatchedModuleObj(UpperCamelCase , attrs=self.attrs ) )
__UpperCAmelCase : int = getattr(self.obj , UpperCamelCase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(UpperCamelCase , UpperCamelCase , _PatchedModuleObj(getattr(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , attrs=self.attrs ) )
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase , UpperCamelCase )
# finally set the target attribute
setattr(UpperCamelCase , UpperCamelCase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__UpperCAmelCase : int = getattr(import_module(""".""".join(UpperCamelCase ) ) , UpperCamelCase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , UpperCamelCase ) is attr_value:
__UpperCAmelCase : Union[str, Any] = getattr(self.obj , UpperCamelCase )
setattr(self.obj , UpperCamelCase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__UpperCAmelCase : str = globals()["""__builtins__"""][target_attr]
setattr(self.obj , UpperCamelCase , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self : str , *UpperCamelCase : Optional[int] ):
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj , UpperCamelCase , self.original.pop(UpperCamelCase ) )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 115
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """convbert"""
def __init__( self , _snake_case=3_0522 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=2 , _snake_case=0.02 , _snake_case=1e-12 , _snake_case=1 , _snake_case=0 , _snake_case=2 , _snake_case=768 , _snake_case=2 , _snake_case=9 , _snake_case=1 , _snake_case=None , **_snake_case , ) -> str:
"""simple docstring"""
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a , )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = embedding_size
UpperCAmelCase = head_ratio
UpperCAmelCase = conv_kernel_size
UpperCAmelCase = num_groups
UpperCAmelCase = classifier_dropout
class lowercase ( A__ ):
'''simple docstring'''
@property
def snake_case_ ( self ) -> int:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 369
|
# Function to print upper half of diamond (pyramid)
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
for i in range(0 , A__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def _lowerCAmelCase ( A__: Optional[int] ):
'''simple docstring'''
for i in range(A__ , 0 , -1 ):
for _ in range(A__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(A__ ) # upper half
reverse_floyd(A__ ) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
__magic_name__ = 1
while K:
__magic_name__ = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__magic_name__ = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 152
| 0
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class a ( lowercase__ ):
"""simple docstring"""
a : int = 'segformer'
def __init__( self : Dict , __lowercase : str=3 , __lowercase : Dict=4 , __lowercase : Any=[2, 2, 2, 2] , __lowercase : Optional[int]=[8, 4, 2, 1] , __lowercase : List[str]=[32, 64, 160, 256] , __lowercase : Union[str, Any]=[7, 3, 3, 3] , __lowercase : Optional[int]=[4, 2, 2, 2] , __lowercase : Any=[1, 2, 5, 8] , __lowercase : List[str]=[4, 4, 4, 4] , __lowercase : Any="gelu" , __lowercase : Optional[int]=0.0 , __lowercase : Dict=0.0 , __lowercase : Optional[Any]=0.1 , __lowercase : int=0.02 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-6 , __lowercase : Tuple=256 , __lowercase : List[Any]=255 , **__lowercase : Union[str, Any] , ) -> List[Any]:
super().__init__(**__lowercase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __lowercase , )
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : Any = num_encoder_blocks
__UpperCAmelCase : List[Any] = depths
__UpperCAmelCase : Dict = sr_ratios
__UpperCAmelCase : int = hidden_sizes
__UpperCAmelCase : Optional[Any] = patch_sizes
__UpperCAmelCase : List[Any] = strides
__UpperCAmelCase : List[str] = mlp_ratios
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : Any = classifier_dropout_prob
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Optional[Any] = drop_path_rate
__UpperCAmelCase : int = layer_norm_eps
__UpperCAmelCase : Dict = decoder_hidden_size
__UpperCAmelCase : Tuple = kwargs.get("""reshape_last_stage""" , __lowercase )
__UpperCAmelCase : int = semantic_loss_ignore_index
class a ( lowercase__ ):
"""simple docstring"""
a : Union[str, Any] = version.parse('1.11' )
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> float:
return 1e-4
@property
def UpperCAmelCase ( self : str ) -> int:
return 12
| 114
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
a : int = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
a : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a : bool = field(
default=lowercase__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a : bool = field(
default=lowercase__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a : Optional[int] = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a : Optional[int] = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a : Optional[int] = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class a :
"""simple docstring"""
a : str = field(
default=lowercase__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a : str = field(
default=lowercase__ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a : Optional[bool] = field(
default=lowercase__ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
a : bool = field(
default=lowercase__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a : bool = field(
default=lowercase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
a : bool = field(
default=lowercase__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" , __lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase : List[Any] = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
datasets.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__UpperCAmelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
__UpperCAmelCase : Tuple = load_dataset(
"""xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
__UpperCAmelCase : List[Any] = load_dataset(
"""xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : str = train_dataset.features["""label"""].names
if training_args.do_eval:
__UpperCAmelCase : Any = load_dataset(
"""xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : str = eval_dataset.features["""label"""].names
if training_args.do_predict:
__UpperCAmelCase : Optional[Any] = load_dataset(
"""xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : List[str] = predict_dataset.features["""label"""].names
# Labels
__UpperCAmelCase : Tuple = len(__lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , idalabel={str(__lowerCamelCase ): label for i, label in enumerate(__lowerCamelCase )} , labelaid={label: i for i, label in enumerate(__lowerCamelCase )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
__UpperCAmelCase : List[Any] = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__UpperCAmelCase : List[Any] = False
def preprocess_function(__lowerCamelCase : int ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] , examples["""hypothesis"""] , padding=__lowerCamelCase , max_length=data_args.max_seq_length , truncation=__lowerCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
__UpperCAmelCase : int = min(len(__lowerCamelCase ) , data_args.max_train_samples )
__UpperCAmelCase : Dict = train_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__UpperCAmelCase : Union[str, Any] = train_dataset.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__lowerCamelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__UpperCAmelCase : Tuple = min(len(__lowerCamelCase ) , data_args.max_eval_samples )
__UpperCAmelCase : List[str] = eval_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__UpperCAmelCase : Dict = eval_dataset.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
__UpperCAmelCase : Dict = min(len(__lowerCamelCase ) , data_args.max_predict_samples )
__UpperCAmelCase : Tuple = predict_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
__UpperCAmelCase : Any = predict_dataset.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , )
# Get the metric function
__UpperCAmelCase : Tuple = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCamelCase : EvalPrediction ):
__UpperCAmelCase : Optional[Any] = p.predictions[0] if isinstance(p.predictions , __lowerCamelCase ) else p.predictions
__UpperCAmelCase : str = np.argmax(__lowerCamelCase , axis=1 )
return metric.compute(predictions=__lowerCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__UpperCAmelCase : Any = default_data_collator
elif training_args.fpaa:
__UpperCAmelCase : Tuple = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 )
else:
__UpperCAmelCase : int = None
# Initialize our Trainer
__UpperCAmelCase : Union[str, Any] = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
__UpperCAmelCase : List[str] = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase : Union[str, Any] = last_checkpoint
__UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=__lowerCamelCase )
__UpperCAmelCase : Dict = train_result.metrics
__UpperCAmelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCamelCase )
)
__UpperCAmelCase : Dict = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __lowerCamelCase )
trainer.save_metrics("""train""" , __lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__UpperCAmelCase : Dict = trainer.evaluate(eval_dataset=__lowerCamelCase )
__UpperCAmelCase : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCamelCase )
__UpperCAmelCase : Tuple = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.log_metrics("""eval""" , __lowerCamelCase )
trainer.save_metrics("""eval""" , __lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = trainer.predict(__lowerCamelCase , metric_key_prefix="""predict""" )
__UpperCAmelCase : int = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__lowerCamelCase )
)
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.log_metrics("""predict""" , __lowerCamelCase )
trainer.save_metrics("""predict""" , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.argmax(__lowerCamelCase , axis=1 )
__UpperCAmelCase : Tuple = os.path.join(training_args.output_dir , """predictions.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , """w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__lowerCamelCase ):
__UpperCAmelCase : Tuple = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 114
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__a = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> int:
UpperCAmelCase_ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir ,'''models/bert/''' ) )
UpperCAmelCase_ : Optional[int] = self.transformer_dir
shutil.copy(
os.path.join(_SCREAMING_SNAKE_CASE ,'''src/transformers/models/bert/modeling_bert.py''' ) ,os.path.join(self.transformer_dir ,'''models/bert/modeling_bert.py''' ) ,)
def a__ ( self ) -> Any:
UpperCAmelCase_ : Optional[Any] = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
UpperCAmelCase_ : List[str] = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
UpperCAmelCase_ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
UpperCAmelCase_ : Any = black.format_str(_SCREAMING_SNAKE_CASE ,mode=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.transformer_dir ,'''new_code.py''' )
with open(_SCREAMING_SNAKE_CASE ,'''w''' ,newline='''\n''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ,'''r''' ) as f:
self.assertTrue(f.read() ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' ,'''BertLMPredictionHead''' ,REFERENCE_CODE + '''\n''' ,)
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' ,'''BertLMPredictionHead''' ,_SCREAMING_SNAKE_CASE ,)
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' ,'''TestModelLMPredictionHead''' ,re.sub('''Bert''' ,'''TestModel''' ,_SCREAMING_SNAKE_CASE ) ,)
# Copy consistency with a really long name
UpperCAmelCase_ : List[str] = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' ,f'''{long_class_name}LMPredictionHead''' ,re.sub('''Bert''' ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' ,'''TestModelLMPredictionHead''' ,_SCREAMING_SNAKE_CASE ,overwrite_result=re.sub('''Bert''' ,'''TestModel''' ,_SCREAMING_SNAKE_CASE ) ,)
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : int = check_copies.LOCALIZED_READMES['README_zh-hans.md']
UpperCAmelCase_ : List[str] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
UpperCAmelCase_ : Any = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
UpperCAmelCase_ : Tuple = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
UpperCAmelCase_ : str = check_copies.convert_to_localized_md(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,localized_readme['''format_model_list'''] )
self.assertFalse(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = check_copies.convert_to_localized_md(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
UpperCAmelCase_ : Any = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
UpperCAmelCase_ : Any = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
UpperCAmelCase_ : str = check_copies.convert_to_localized_md(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
| 356
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__a = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : int = state_dict.pop(_lowercase )
UpperCAmelCase_ : Optional[int] = val
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ : List[Any] = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCAmelCase_ : Optional[Any] = value
else:
UpperCAmelCase_ : Union[str, Any] = value
return new_state_dict
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : int = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ : Dict = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase_ : Any = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[:256, :]
UpperCAmelCase_ : Optional[int] = in_proj_bias[:256]
UpperCAmelCase_ : Tuple = in_proj_weight[256:512, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[256:512]
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[-256:, :]
UpperCAmelCase_ : str = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ : Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase_ : Optional[int] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[:256, :]
UpperCAmelCase_ : List[str] = in_proj_bias[:256]
UpperCAmelCase_ : Optional[int] = in_proj_weight[256:512, :]
UpperCAmelCase_ : str = in_proj_bias[256:512]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[-256:, :]
UpperCAmelCase_ : Union[str, Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ : List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCAmelCase_ : List[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ : List[str] = in_proj_weight_cross_attn[:256, :]
UpperCAmelCase_ : str = in_proj_bias_cross_attn[:256]
UpperCAmelCase_ : int = in_proj_weight_cross_attn[256:512, :]
UpperCAmelCase_ : Tuple = in_proj_bias_cross_attn[256:512]
UpperCAmelCase_ : List[str] = in_proj_weight_cross_attn[-256:, :]
UpperCAmelCase_ : Dict = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_ : List[Any] = image.size
UpperCAmelCase_ : List[Any] = max(_lowercase , _lowercase )
UpperCAmelCase_ : Dict = 800 if '''detection''' in checkpoint_url else 1000
UpperCAmelCase_ : Any = target_max_size / current_max_size
UpperCAmelCase_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = F.to_tensor(_lowercase )
UpperCAmelCase_ : Optional[Any] = F.normalize(_lowercase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
UpperCAmelCase_ : Any = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
UpperCAmelCase_ : Optional[int] = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ : int = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCAmelCase_ : int = state_dict.pop(_lowercase )
UpperCAmelCase_ : Dict = val
# create HuggingFace model and load state dict
UpperCAmelCase_ : str = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCAmelCase_ : Any = 15
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Union[str, Any] = {0: '''table''', 1: '''table rotated'''}
UpperCAmelCase_ : str = idalabel
UpperCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase_ : Optional[Any] = 125
UpperCAmelCase_ : Optional[Any] = 6
UpperCAmelCase_ : Dict = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCAmelCase_ : Optional[Any] = idalabel
UpperCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Union[str, Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCAmelCase_ : Union[str, Any] = TableTransformerForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# verify our conversion
UpperCAmelCase_ : str = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCAmelCase_ : Dict = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=_lowercase )
UpperCAmelCase_ : Dict = Image.open(_lowercase ).convert('''RGB''' )
UpperCAmelCase_ : Any = normalize(resize(_lowercase , _lowercase ) ).unsqueeze(0 )
UpperCAmelCase_ : Dict = model(_lowercase )
if "detection" in checkpoint_url:
UpperCAmelCase_ : Any = (1, 15, 3)
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
UpperCAmelCase_ : Any = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
UpperCAmelCase_ : Any = (1, 125, 7)
UpperCAmelCase_ : Any = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
UpperCAmelCase_ : str = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _lowercase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _lowercase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCAmelCase_ : List[Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(_lowercase )
image_processor.push_to_hub(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__a = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 235
| 0
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __A( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=4 , ) -> Tuple:
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_attention_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_choices
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_attention_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __A( a , unittest.TestCase ):
snake_case_ = True
snake_case_ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = FlaxBertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = FlaxBertModel.from_pretrained('''bert-base-cased''' )
__a = model(np.ones((1, 1) ) )
self.assertIsNotNone(_snake_case )
| 6
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A : int = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class __A( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]:
'''simple docstring'''
__a = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_snake_case )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_snake_case , repo_id='''test-model-flax''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_snake_case )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = True
__a = flatten_dict(modela.params )
__a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__a = False
return models_are_equal
@require_flax
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_snake_case )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case ) )
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertTrue(check_models_equal(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_snake_case )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case ) , max_shard_size='''10KB''' )
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertTrue(check_models_equal(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertIsNotNone(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertIsNotNone(_snake_case )
| 6
| 1
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : Tuple ):
'''simple docstring'''
_a = []
for part_id in partition_order:
_a = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(UpperCamelCase ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ():
'''simple docstring'''
_a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_a = spark.range(100 ).repartition(1 )
_a = Spark(UpperCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ():
'''simple docstring'''
_a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_a = spark.range(10 ).repartition(2 )
_a = [1, 0]
_a = _generate_iterable_examples(UpperCamelCase , UpperCamelCase ) # Reverse the partitions.
_a = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase , UpperCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_a , _a = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ():
'''simple docstring'''
_a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_a = spark.range(10 ).repartition(1 )
_a = SparkExamplesIterable(UpperCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(UpperCamelCase ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ():
'''simple docstring'''
_a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_a = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
_a = lambda UpperCamelCase : x.reverse()
_a = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase , [2, 1, 0] )
_a = SparkExamplesIterable(UpperCamelCase ).shuffle_data_sources(UpperCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(UpperCamelCase ):
_a , _a = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ():
'''simple docstring'''
_a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_a = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_a = SparkExamplesIterable(UpperCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_a = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(UpperCamelCase ):
_a , _a = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_a = SparkExamplesIterable(UpperCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_a = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(UpperCamelCase ):
_a , _a = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case_ ():
'''simple docstring'''
_a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
_a = spark.range(100 ).repartition(1 )
_a = Spark(UpperCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 179
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class A ( _a ):
lowercase_ = 42
lowercase_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 179
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : str = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 156
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Any = '''EncodecFeatureExtractor'''
A__ : Optional[int] = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ):
super().__init__(_snake_case , _snake_case )
__lowercase : List[Any] = self.feature_extractor
__lowercase : Tuple = False
def snake_case_ ( self : Optional[int] , _snake_case : Union[str, Any]=None , _snake_case : Optional[Any]=None , _snake_case : List[str]=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_snake_case , language=_snake_case , no_timestamps=_snake_case )
def __call__( self : str , *_snake_case : Tuple , **_snake_case : str ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_snake_case , **_snake_case )
__lowercase : Optional[Any] = kwargs.pop('''audio''' , _snake_case )
__lowercase : str = kwargs.pop('''sampling_rate''' , _snake_case )
__lowercase : Any = kwargs.pop('''text''' , _snake_case )
if len(_snake_case ) > 0:
__lowercase : Dict = args[0]
__lowercase : Any = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__lowercase : str = self.tokenizer(_snake_case , **_snake_case )
if audio is not None:
__lowercase : List[str] = self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__lowercase : Tuple = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__lowercase : Tuple = audio_inputs['''padding_mask''']
return inputs
def snake_case_ ( self : int , *_snake_case : int , **_snake_case : Any ):
__lowercase : Dict = kwargs.pop('''audio''' , _snake_case )
__lowercase : Tuple = kwargs.pop('''padding_mask''' , _snake_case )
if len(_snake_case ) > 0:
__lowercase : str = args[0]
__lowercase : Tuple = args[1:]
if audio_values is not None:
return self._decode_audio(_snake_case , padding_mask=_snake_case )
else:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def snake_case_ ( self : Optional[int] , *_snake_case : int , **_snake_case : List[str] ):
return self.tokenizer.decode(*_snake_case , **_snake_case )
def snake_case_ ( self : Dict , _snake_case : List[Any] , _snake_case : Optional = None ):
__lowercase : Union[str, Any] = to_numpy(_snake_case )
__lowercase , __lowercase , __lowercase : Optional[int] = audio_values.shape
if padding_mask is None:
return list(_snake_case )
__lowercase : Optional[int] = to_numpy(_snake_case )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__lowercase : int = seq_len - padding_mask.shape[-1]
__lowercase : Optional[int] = 1 - self.feature_extractor.padding_value
__lowercase : Tuple = np.pad(_snake_case , ((0, 0), (0, difference)) , '''constant''' , constant_values=_snake_case )
__lowercase : str = audio_values.tolist()
for i in range(_snake_case ):
__lowercase : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__lowercase : Any = sliced_audio.reshape(_snake_case , -1 )
return audio_values
| 156
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[2, 2, 3, 2] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , )->Union[str, Any]:
'''simple docstring'''
A_ : Tuple = parent
A_ : Optional[Any] = batch_size
A_ : int = image_size
A_ : Optional[Any] = num_channels
A_ : List[str] = num_stages
A_ : Tuple = hidden_sizes
A_ : int = depths
A_ : Tuple = is_training
A_ : Any = use_labels
A_ : List[Any] = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : Union[str, Any] = type_sequence_label_size
A_ : str = initializer_range
A_ : Any = out_features
A_ : Optional[int] = num_labels
A_ : List[Any] = scope
A_ : Any = num_stages
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Tuple = None
if self.use_labels:
A_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self )->List[Any]:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _snake_case ( self )->int:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_SCREAMING_SNAKE_CASE , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_SCREAMING_SNAKE_CASE , loss_ignore_index=255 , num_labels=self.num_labels , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
A_ : Tuple = UperNetForSemanticSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Optional[int] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
snake_case = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
snake_case = False
snake_case = False
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : str = UperNetModelTester(self )
A_ : List[str] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self )->Dict:
'''simple docstring'''
return
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
A_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Dict = [*signature.parameters.keys()]
A_ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def _snake_case ( self )->Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def _snake_case ( self )->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
pass
def _snake_case ( self )->int:
'''simple docstring'''
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : List[str] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A_ : List[Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
A_ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Any = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[int] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Dict = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->int:
'''simple docstring'''
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = _config_zero_init(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def _snake_case ( self )->List[str]:
'''simple docstring'''
pass
@slow
def _snake_case ( self )->Tuple:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ):
A_ : Union[str, Any] = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
A_ : Optional[int] = Image.open(UpperCAmelCase_ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Tuple = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
A_ : Dict = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_SCREAMING_SNAKE_CASE )
A_ : Dict = prepare_img()
A_ : int = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
A_ : Dict = model(**_SCREAMING_SNAKE_CASE )
A_ : str = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
A_ : Dict = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : int = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
A_ : Tuple = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_SCREAMING_SNAKE_CASE )
A_ : List[str] = prepare_img()
A_ : str = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
A_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
A_ : Any = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 352
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
A_ , A_ , A_ : Any = equationa
A_ , A_ , A_ : Union[str, Any] = equationa
# Calculate the determinants of the matrices
A_ : Optional[Any] = aa * ba - aa * ba
A_ : Optional[int] = ca * ba - ca * ba
A_ : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
A_ : Optional[int] = determinant_x / determinant
A_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 65
| 0
|
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : List[str]=1E-1_2 ) -> str:
_UpperCAmelCase : Optional[Any] = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(_lowerCAmelCase, axis=1 ), a_min=_lowerCAmelCase ) ).T
_UpperCAmelCase : Union[str, Any] = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(_lowerCAmelCase, axis=1 ), a_min=_lowerCAmelCase ) ).T
return jnp.matmul(_lowerCAmelCase, norm_emb_a.T )
class _UpperCAmelCase ( nn.Module):
__a : CLIPConfig
__a : jnp.dtype = jnp.floataa
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxCLIPVisionModule(self.config.vision_config )
_UpperCAmelCase : List[str] = nn.Dense(self.config.projection_dim , use_bias=_A , dtype=self.dtype )
_UpperCAmelCase : str = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
_UpperCAmelCase : Tuple = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
_UpperCAmelCase : List[str] = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
_UpperCAmelCase : List[Any] = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self , _A ) -> int:
'''simple docstring'''
_UpperCAmelCase : int = self.vision_model(_A )[1]
_UpperCAmelCase : Optional[Any] = self.visual_projection(_A )
_UpperCAmelCase : Any = jax_cosine_distance(_A , self.special_care_embeds )
_UpperCAmelCase : Optional[int] = jax_cosine_distance(_A , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
_UpperCAmelCase : Optional[int] = 0.0
_UpperCAmelCase : str = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
_UpperCAmelCase : Dict = jnp.round(_A , 3 )
_UpperCAmelCase : List[str] = jnp.any(special_scores > 0 , axis=1 , keepdims=_A )
# Use a lower threshold if an image has any special care concept
_UpperCAmelCase : str = is_special_care * 0.01
_UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
_UpperCAmelCase : Dict = jnp.round(_A , 3 )
_UpperCAmelCase : int = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _UpperCAmelCase ( __a):
__a : str = CLIPConfig
__a : Optional[int] = """clip_input"""
__a : Optional[int] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , _A , _A = None , _A = 0 , _A = jnp.floataa , _A = True , **_A , ) -> Any:
'''simple docstring'''
if input_shape is None:
_UpperCAmelCase : Optional[Any] = (1, 2_24, 2_24, 3)
_UpperCAmelCase : Tuple = self.module_class(config=_A , dtype=_A , **_A )
super().__init__(_A , _A , input_shape=_A , seed=_A , dtype=_A , _do_init=_do_init )
def __snake_case ( self , _A , _A , _A = None ) -> FrozenDict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = jax.random.normal(_A , _A )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = jax.random.split(_A )
_UpperCAmelCase : Union[str, Any] = {"""params""": params_rng, """dropout""": dropout_rng}
_UpperCAmelCase : Dict = self.module.init(_A , _A )["""params"""]
return random_params
def __call__( self , _A , _A = None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = jnp.transpose(_A , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(_A , dtype=jnp.floataa ) , rngs={} , )
| 246
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ : List[Any] = '''src/transformers'''
lowerCamelCase__ : Union[str, Any] = '''docs/source/en'''
lowerCamelCase__ : Optional[int] = '''.'''
def UpperCamelCase ( _lowerCAmelCase : Any, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
with open(_lowerCAmelCase, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
_UpperCAmelCase : str = f.readlines()
# Find the start prompt.
_UpperCAmelCase : Dict = 0
while not lines[start_index].startswith(_lowerCAmelCase ):
start_index += 1
start_index += 1
_UpperCAmelCase : List[Any] = start_index
while not lines[end_index].startswith(_lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ : Dict = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ : Union[str, Any] = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowerCamelCase__ : Optional[int] = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ : Any = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any] ) -> Any:
_UpperCAmelCase : Optional[int] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", _lowerCAmelCase )
return [m.group(0 ) for m in matches]
def UpperCamelCase ( _lowerCAmelCase : Any, _lowerCAmelCase : int ) -> Any:
_UpperCAmelCase : Union[str, Any] = 2 if text == """✅""" or text == """❌""" else len(_lowerCAmelCase )
_UpperCAmelCase : str = (width - text_length) // 2
_UpperCAmelCase : List[Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def UpperCamelCase ( ) -> List[Any]:
_UpperCAmelCase : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCAmelCase : List[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCAmelCase : int = {name: config.replace("""Config""", """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCAmelCase : Dict = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : List[str] = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : List[str] = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : str = collections.defaultdict(_lowerCAmelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(_lowerCAmelCase ):
_UpperCAmelCase : List[str] = None
if attr_name.endswith("""Tokenizer""" ):
_UpperCAmelCase : Optional[int] = slow_tokenizers
_UpperCAmelCase : Optional[int] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
_UpperCAmelCase : List[Any] = fast_tokenizers
_UpperCAmelCase : str = attr_name[:-13]
elif _re_tf_models.match(_lowerCAmelCase ) is not None:
_UpperCAmelCase : Tuple = tf_models
_UpperCAmelCase : Any = _re_tf_models.match(_lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(_lowerCAmelCase ) is not None:
_UpperCAmelCase : Any = flax_models
_UpperCAmelCase : List[Any] = _re_flax_models.match(_lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(_lowerCAmelCase ) is not None:
_UpperCAmelCase : Union[str, Any] = pt_models
_UpperCAmelCase : List[Any] = _re_pt_models.match(_lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(_lowerCAmelCase ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCAmelCase : List[str] = True
break
# Try again after removing the last word in the name
_UpperCAmelCase : Optional[Any] = """""".join(camel_case_split(_lowerCAmelCase )[:-1] )
# Let's build that table!
_UpperCAmelCase : List[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCAmelCase : List[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCAmelCase : List[Any] = [len(_lowerCAmelCase ) + 2 for c in columns]
_UpperCAmelCase : Optional[int] = max([len(_lowerCAmelCase ) for name in model_names] ) + 2
# Build the table per se
_UpperCAmelCase : Tuple = """|""" + """|""".join([_center_text(_lowerCAmelCase, _lowerCAmelCase ) for c, w in zip(_lowerCAmelCase, _lowerCAmelCase )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
_UpperCAmelCase : Dict = {True: """✅""", False: """❌"""}
for name in model_names:
_UpperCAmelCase : Optional[int] = model_name_to_prefix[name]
_UpperCAmelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_lowerCAmelCase, _lowerCAmelCase ) for l, w in zip(_lowerCAmelCase, _lowerCAmelCase )] ) + "|\n"
return table
def UpperCamelCase ( _lowerCAmelCase : Any=False ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = _find_text_in_file(
filename=os.path.join(_lowerCAmelCase, """index.md""" ), start_prompt="""<!--This table is updated automatically from the auto modules""", end_prompt="""<!-- End table-->""", )
_UpperCAmelCase : List[str] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_lowerCAmelCase, """index.md""" ), """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase__ : List[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 246
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowercase : List[str] = StableDiffusionDiffEditPipeline
lowercase : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
lowercase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase : Union[str, Any] = frozenset([] )
def a_ ( self ):
torch.manual_seed(0 )
UpperCamelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Dict = DDIMInverseScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_zero=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
UpperCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
UpperCamelCase : List[str] = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
UpperCamelCase : str = floats_tensor((1, 16, 16) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase : Optional[int] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert("""RGB""" )
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
UpperCamelCase : Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
UpperCamelCase : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase : List[Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert("""RGB""" )
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
UpperCamelCase : Any = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def a_ ( self ):
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
UpperCamelCase : Optional[Any] = self.get_dummy_components()
UpperCamelCase : str = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = pipe(**SCREAMING_SNAKE_CASE_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
pipe_loaded.to(SCREAMING_SNAKE_CASE_ )
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
UpperCamelCase : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = pipe_loaded(**SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : Dict = np.abs(output - output_loaded ).max()
self.assertLess(SCREAMING_SNAKE_CASE_ , 1e-4 )
def a_ ( self ):
UpperCamelCase : Dict = """cpu"""
UpperCamelCase : Dict = self.get_dummy_components()
UpperCamelCase : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.get_dummy_mask_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = pipe.generate_mask(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCamelCase : List[Any] = np.array([0] * 9 )
UpperCamelCase : List[Any] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def a_ ( self ):
UpperCamelCase : Dict = """cpu"""
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : Any = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = self.get_dummy_inversion_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = pipe.invert(**SCREAMING_SNAKE_CASE_ ).images
UpperCamelCase : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCamelCase : Union[str, Any] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
UpperCamelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 )
def a_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def a_ ( self ):
UpperCamelCase : str = """cpu"""
UpperCamelCase : int = self.get_dummy_components()
UpperCamelCase : str = {"""beta_start""": 0.00085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
UpperCamelCase : List[Any] = DPMSolverMultistepScheduler(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = DPMSolverMultistepInverseScheduler(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = self.get_dummy_inversion_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = pipe.invert(**SCREAMING_SNAKE_CASE_ ).images
UpperCamelCase : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCamelCase : Tuple = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
UpperCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 )
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def a_ ( cls ):
UpperCamelCase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
UpperCamelCase : List[str] = raw_image.convert("""RGB""" ).resize((768, 768) )
UpperCamelCase : Optional[int] = raw_image
def a_ ( self ):
UpperCamelCase : List[Any] = torch.manual_seed(0 )
UpperCamelCase : List[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
UpperCamelCase : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCamelCase : List[Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = """a bowl of fruit"""
UpperCamelCase : int = """a bowl of pears"""
UpperCamelCase : Tuple = pipe.generate_mask(
image=self.raw_image , source_prompt=SCREAMING_SNAKE_CASE_ , target_prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Any = pipe.invert(
prompt=SCREAMING_SNAKE_CASE_ , image=self.raw_image , inpaint_strength=0.7 , generator=SCREAMING_SNAKE_CASE_ ).latents
UpperCamelCase : Optional[int] = pipe(
prompt=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , image_latents=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
UpperCamelCase : Dict = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def a_ ( self ):
UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase : int = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
UpperCamelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCamelCase : Dict = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = """a bowl of fruit"""
UpperCamelCase : List[Any] = """a bowl of pears"""
UpperCamelCase : List[str] = pipe.generate_mask(
image=self.raw_image , source_prompt=SCREAMING_SNAKE_CASE_ , target_prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Dict = pipe.invert(
prompt=SCREAMING_SNAKE_CASE_ , image=self.raw_image , inpaint_strength=0.7 , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=25 , ).latents
UpperCamelCase : Any = pipe(
prompt=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , image_latents=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
UpperCamelCase : Any = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 370
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def A_ ( snake_case_ : int ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def A_ ( ):
'''simple docstring'''
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
UpperCamelCase : Optional[Any] = [1, 2, 3]
with pytest.raises(snake_case_ ):
with parallel_backend("""unsupported backend""" ):
map_nested(snake_case_ ,snake_case_ ,num_proc=2 )
with pytest.raises(snake_case_ ):
with parallel_backend("""unsupported backend""" ):
map_nested(snake_case_ ,snake_case_ ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" ,[2, -1] )
def A_ ( snake_case_ : List[str] ):
'''simple docstring'''
UpperCamelCase : List[Any] = [1, 2]
UpperCamelCase : List[Any] = {"""a""": 1, """b""": 2}
UpperCamelCase : List[str] = {"""a""": [1, 2], """b""": [3, 4]}
UpperCamelCase : Tuple = {"""a""": {"""1""": 1}, """b""": 2}
UpperCamelCase : Any = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
UpperCamelCase : Optional[int] = [2, 3]
UpperCamelCase : List[str] = {"""a""": 2, """b""": 3}
UpperCamelCase : Any = {"""a""": [2, 3], """b""": [4, 5]}
UpperCamelCase : Tuple = {"""a""": {"""1""": 2}, """b""": 3}
UpperCamelCase : List[str] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa
| 27
| 0
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = MobileBertConfig.from_json_file(_a )
print(f'Building PyTorch model from configuration: {config}' )
lowerCAmelCase__ : Optional[int] = MobileBertForPreTraining(_a )
# Load weights from tf checkpoint
lowerCAmelCase__ : List[Any] = load_tf_weights_in_mobilebert(_a , _a , _a )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _a )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 131
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _a ( unittest.TestCase):
def UpperCAmelCase__( self : Tuple )-> int:
lowerCAmelCase__ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ : Tuple = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase__ : Any = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
lowerCAmelCase__ : Dict = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
lowerCAmelCase__ : Any = {'''unk_token''': '''<unk>'''}
lowerCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : Union[str, Any] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowerCAmelCase__ : Tuple = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , **_SCREAMING_SNAKE_CASE : Any )-> Any:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Tuple , **_SCREAMING_SNAKE_CASE : List[str] )-> List[str]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , **_SCREAMING_SNAKE_CASE : Optional[Any] )-> Optional[Any]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : str )-> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__( self : Dict )-> Any:
lowerCAmelCase__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ : Optional[Any] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__( self : int )-> List[str]:
lowerCAmelCase__ : Optional[Any] = self.get_tokenizer()
lowerCAmelCase__ : List[Any] = self.get_rust_tokenizer()
lowerCAmelCase__ : Tuple = self.get_image_processor()
lowerCAmelCase__ : Tuple = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] )-> Optional[int]:
lowerCAmelCase__ : Tuple = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCAmelCase__ : Union[str, Any] = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Dict )-> Union[str, Any]:
lowerCAmelCase__ : List[Any] = self.get_image_processor()
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : Optional[Any] = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = self.prepare_image_inputs()
lowerCAmelCase__ : List[Any] = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
lowerCAmelCase__ : Optional[int] = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__( self : Any )-> Tuple:
lowerCAmelCase__ : Any = self.get_image_processor()
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : List[str] = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = '''lower newer'''
lowerCAmelCase__ : List[Any] = processor(text=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
lowerCAmelCase__ : Optional[Any] = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase__( self : Union[str, Any] )-> Tuple:
lowerCAmelCase__ : Any = self.get_image_processor()
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : List[str] = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = '''lower newer'''
lowerCAmelCase__ : str = self.prepare_image_inputs()
lowerCAmelCase__ : Tuple = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def UpperCAmelCase__( self : Dict )-> Union[str, Any]:
lowerCAmelCase__ : List[Any] = '''google/owlvit-base-patch32'''
lowerCAmelCase__ : str = OwlViTProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = ['''cat''', '''nasa badge''']
lowerCAmelCase__ : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def UpperCAmelCase__( self : Any )-> Tuple:
lowerCAmelCase__ : str = '''google/owlvit-base-patch32'''
lowerCAmelCase__ : int = OwlViTProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = [['''cat''', '''nasa badge'''], ['''person''']]
lowerCAmelCase__ : Union[str, Any] = processor(text=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = 16
lowerCAmelCase__ : Optional[int] = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = max([len(_SCREAMING_SNAKE_CASE ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def UpperCAmelCase__( self : List[str] )-> str:
lowerCAmelCase__ : Dict = '''google/owlvit-base-patch32'''
lowerCAmelCase__ : Union[str, Any] = OwlViTProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = ['''cat''', '''nasa badge''']
lowerCAmelCase__ : Union[str, Any] = processor(text=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = 16
lowerCAmelCase__ : Optional[int] = inputs['''input_ids''']
lowerCAmelCase__ : str = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase__( self : Optional[Any] )-> List[str]:
lowerCAmelCase__ : Any = self.get_image_processor()
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : str = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = self.prepare_image_inputs()
lowerCAmelCase__ : Union[str, Any] = self.prepare_image_inputs()
lowerCAmelCase__ : List[Any] = processor(images=_SCREAMING_SNAKE_CASE , query_images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def UpperCAmelCase__( self : int )-> Dict:
lowerCAmelCase__ : int = self.get_image_processor()
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : Tuple = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ : Optional[Any] = processor.batch_decode(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 131
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = "▁"
__magic_name__ = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
__magic_name__ = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
__magic_name__ = {"vinai/bartpho-syllable": 1024}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case , _snake_case="<s>" , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case = None , **_snake_case , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
UpperCAmelCase = vocab_file
UpperCAmelCase = monolingual_vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCAmelCase = {}
UpperCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_snake_case ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase = cnt
cnt += 1
with open(_snake_case , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
UpperCAmelCase = line.strip().split()[0]
UpperCAmelCase = len(self.fairseq_tokens_to_ids )
if str(_snake_case ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase = len(self.fairseq_tokens_to_ids )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> int:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _snake_case ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self , _snake_case , _snake_case = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , _snake_case , _snake_case = None , _snake_case = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def snake_case_ ( self , _snake_case , _snake_case = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self , _snake_case ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def snake_case_ ( self , _snake_case ) -> Tuple:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case_ ( self , _snake_case ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def snake_case_ ( self , _snake_case ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = ''''''.join(_snake_case ).replace(_snake_case , ''' ''' ).strip()
return out_string
def snake_case_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_snake_case ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _snake_case )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(_snake_case )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 152
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 152
| 1
|
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = int(_lowerCamelCase )
if n_element < 1:
_lowerCAmelCase : Tuple = ValueError("a should be a positive number" )
raise my_error
_lowerCAmelCase : str = [1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = (0, 0, 0)
_lowerCAmelCase : List[Any] = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
_snake_case = hamming(int(n))
print("-----------------------------------------------------")
print(f'''The list with nth numbers is: {hamming_numbers}''')
print("-----------------------------------------------------")
| 36
|
"""simple docstring"""
from __future__ import annotations
__A : List[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__A : str = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = []
lowercase_ : List[Any] = len(__snake_case )
for i in range(__snake_case ):
lowercase_ : float = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
lowercase_ : List[str] = arr[j]
break
result.append(__snake_case )
return result
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = []
for i, outer in enumerate(__snake_case ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : List[Any] = inner
break
result.append(__snake_case )
return result
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = len(__snake_case )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__A : int = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 33
| 0
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowercase_ ( unittest.TestCase ):
A__ : int = MODEL_FOR_CAUSAL_LM_MAPPING
A__ : Dict = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase_ = text_generator("""This is a test""" , do_sample=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
UpperCamelCase_ = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__UpperCamelCase , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
UpperCamelCase_ = text_generator("""This is a test""" , do_sample=__UpperCamelCase , num_return_sequences=2 , return_tensors=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
{"""generated_token_ids""": ANY(__UpperCamelCase )},
{"""generated_token_ids""": ANY(__UpperCamelCase )},
] , )
UpperCamelCase_ = text_generator.model.config.eos_token_id
UpperCamelCase_ = """<pad>"""
UpperCamelCase_ = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__UpperCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCamelCase , )
self.assertEqual(
__UpperCamelCase , [
[
{"""generated_token_ids""": ANY(__UpperCamelCase )},
{"""generated_token_ids""": ANY(__UpperCamelCase )},
],
[
{"""generated_token_ids""": ANY(__UpperCamelCase )},
{"""generated_token_ids""": ANY(__UpperCamelCase )},
],
] , )
@require_tf
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase_ = text_generator("""This is a test""" , do_sample=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
UpperCamelCase_ = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = TextGenerationPipeline(model=__UpperCamelCase , tokenizer=__UpperCamelCase )
return text_generator, ["This is a test", "Another test"]
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """Hello I believe in"""
UpperCamelCase_ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase_ = text_generator(__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
UpperCamelCase_ = text_generator(__UpperCamelCase , stop_sequence=""" fe""" )
self.assertEqual(__UpperCamelCase , [{"""generated_text""": """Hello I believe in fe"""}] )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = text_generator.model
UpperCamelCase_ = text_generator.tokenizer
UpperCamelCase_ = text_generator("""This is a test""" )
self.assertEqual(__UpperCamelCase , [{"""generated_text""": ANY(__UpperCamelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCamelCase_ = text_generator("""This is a test""" , return_full_text=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [{"""generated_text""": ANY(__UpperCamelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCamelCase_ = pipeline(task="""text-generation""" , model=__UpperCamelCase , tokenizer=__UpperCamelCase , return_full_text=__UpperCamelCase )
UpperCamelCase_ = text_generator("""This is a test""" )
self.assertEqual(__UpperCamelCase , [{"""generated_text""": ANY(__UpperCamelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCamelCase_ = text_generator("""This is a test""" , return_full_text=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [{"""generated_text""": ANY(__UpperCamelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCamelCase_ = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
[{"""generated_text""": ANY(__UpperCamelCase )}, {"""generated_text""": ANY(__UpperCamelCase )}],
[{"""generated_text""": ANY(__UpperCamelCase )}, {"""generated_text""": ANY(__UpperCamelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCamelCase_ = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
[{"""generated_text""": ANY(__UpperCamelCase )}, {"""generated_text""": ANY(__UpperCamelCase )}],
[{"""generated_text""": ANY(__UpperCamelCase )}, {"""generated_text""": ANY(__UpperCamelCase )}],
] , )
with self.assertRaises(__UpperCamelCase ):
UpperCamelCase_ = text_generator("""test""" , return_full_text=__UpperCamelCase , return_text=__UpperCamelCase )
with self.assertRaises(__UpperCamelCase ):
UpperCamelCase_ = text_generator("""test""" , return_full_text=__UpperCamelCase , return_tensors=__UpperCamelCase )
with self.assertRaises(__UpperCamelCase ):
UpperCamelCase_ = text_generator("""test""" , return_text=__UpperCamelCase , return_tensors=__UpperCamelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCamelCase_ = text_generator("""""" )
self.assertEqual(__UpperCamelCase , [{"""generated_text""": ANY(__UpperCamelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCamelCase_ = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCamelCase_ = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 5_0_0 , max_new_tokens=2_0 )
UpperCamelCase_ = text_generator("""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(__UpperCamelCase ):
text_generator(
"""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase_ ( self ):
"""simple docstring"""
import torch
# Classic `model_kwargs`
UpperCamelCase_ = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase_ = pipe("""This is a test""" )
self.assertEqual(
__UpperCamelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCamelCase_ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase_ = pipe("""This is a test""" )
self.assertEqual(
__UpperCamelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCamelCase_ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCamelCase_ = pipe("""This is a test""" )
self.assertEqual(
__UpperCamelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCamelCase_ ( self ):
"""simple docstring"""
import torch
UpperCamelCase_ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase_ ( self ):
"""simple docstring"""
import torch
UpperCamelCase_ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__UpperCamelCase , top_p=0.5 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """Hello world"""
UpperCamelCase_ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
UpperCamelCase_ = logging.get_logger("""transformers.generation.tf_utils""" )
else:
UpperCamelCase_ = logging.get_logger("""transformers.generation.utils""" )
UpperCamelCase_ = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__UpperCamelCase ) as cl:
UpperCamelCase_ = text_generator(__UpperCamelCase , max_length=1_0 , max_new_tokens=1 )
self.assertIn(__UpperCamelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__UpperCamelCase ) as cl:
UpperCamelCase_ = text_generator(__UpperCamelCase , max_new_tokens=1 )
self.assertNotIn(__UpperCamelCase , cl.out )
with CaptureLogger(__UpperCamelCase ) as cl:
UpperCamelCase_ = text_generator(__UpperCamelCase , max_length=1_0 )
self.assertNotIn(__UpperCamelCase , cl.out )
| 358
|
def lowerCamelCase__ ( a__ : Optional[int] , a__ : Any ) -> Optional[Any]:
UpperCamelCase_ = 0
UpperCamelCase_ = len(a__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCamelCase_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(a__ ):
return None
UpperCamelCase_ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCamelCase_ = left
UpperCamelCase_ = point
elif point > right:
UpperCamelCase_ = right
UpperCamelCase_ = point
else:
if item < current_item:
UpperCamelCase_ = point - 1
else:
UpperCamelCase_ = point + 1
return None
def lowerCamelCase__ ( a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : List[Any] ) -> Any:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCamelCase_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(a__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(a__ , a__ , a__ , a__ )
elif point > right:
return interpolation_search_by_recursion(a__ , a__ , a__ , a__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
a__ , a__ , a__ , point - 1 )
else:
return interpolation_search_by_recursion(
a__ , a__ , point + 1 , a__ )
def lowerCamelCase__ ( a__ : Tuple ) -> Any:
if collection != sorted(a__ ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
_A = 0
if debug == 1:
_A = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
_A = 67
_A = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('''Not found''')
| 261
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ ={
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 216
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ =logging.get_logger(__name__)
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict ):
__a : List[str] = os.path.abspath(lowerCAmelCase__ )
logger.info(f"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
__a : Tuple = tf.train.list_variables(lowerCAmelCase__ )
__a : Optional[Any] = []
__a : Union[str, Any] = []
__a : str = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__a : Any = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(f"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
__a : Any = name[1:]
# figure out how many levels deep the name is
__a : List[Any] = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(lowerCAmelCase__ )
# read data
__a : Tuple = tf.train.load_variable(lowerCAmelCase__ , lowerCAmelCase__ )
names.append('''/'''.join(lowerCAmelCase__ ) )
arrays.append(lowerCAmelCase__ )
logger.info(f"Read a total of {len(lowerCAmelCase__ ):,} layers" )
# Sanity check
if len(set(lowerCAmelCase__ ) ) != 1:
raise ValueError(f"Found layer names with different depths (layer depth {list(set(lowerCAmelCase__ ) )})" )
__a : int = list(set(lowerCAmelCase__ ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : int = full_name.split('''/''' )
__a : Tuple = model
__a : Dict = []
for i, m_name in enumerate(lowerCAmelCase__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
__a : Union[str, Any] = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''embeddings''' )
__a : List[str] = getattr(lowerCAmelCase__ , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
__a : Dict = getattr(lowerCAmelCase__ , '''encoder''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''layer''' )
__a : Any = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
__a : Any = getattr(lowerCAmelCase__ , '''pooler''' )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
__a : int = getattr(lowerCAmelCase__ , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
__a : List[str] = getattr(lowerCAmelCase__ , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''token_type_embeddings''' )
else:
raise ValueError(f"Unknown embedding layer with name {full_name}" )
trace.append('''weight''' )
__a : Tuple = getattr(lowerCAmelCase__ , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''attention''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
__a : int = getattr(lowerCAmelCase__ , '''attention''' )
__a : List[Any] = getattr(lowerCAmelCase__ , '''output''' )
__a : List[Any] = getattr(lowerCAmelCase__ , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''attention''' )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''output''' )
__a : Any = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
__a : Tuple = getattr(lowerCAmelCase__ , '''output''' )
__a : str = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
__a : int = getattr(lowerCAmelCase__ , '''output''' )
__a : str = getattr(lowerCAmelCase__ , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''intermediate''' )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
__a : int = getattr(lowerCAmelCase__ , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
__a : Dict = getattr(lowerCAmelCase__ , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
__a : List[Any] = getattr(lowerCAmelCase__ , '''weight''' )
else:
logger.warning(f"Ignored {m_name}" )
# for certain layers reshape is necessary
__a : List[str] = '''.'''.join(lowerCAmelCase__ )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , lowerCAmelCase__ ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , lowerCAmelCase__ ):
__a : str = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__a : Optional[Any] = array.transpose()
if pointer.shape == array.shape:
__a : str = torch.from_numpy(lowerCAmelCase__ )
else:
raise ValueError(
f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
f" {array.shape}" )
logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ):
# Instantiate model
logger.info(f"Loading model based on config from {config_path}..." )
__a : Dict = BertConfig.from_json_file(lowerCAmelCase__ )
__a : int = BertModel(lowerCAmelCase__ )
# Load weights from checkpoint
logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
logger.info(f"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
lowercase__ =parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 216
| 1
|
def UpperCAmelCase__ ( lowerCamelCase = 100 ):
lowercase :Union[str, Any] = 0
lowercase :List[Any] = 0
for i in range(1, n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 158
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
_a = StableDiffusionPanoramaPipeline
_a = TEXT_TO_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE ( self: int ):
torch.manual_seed(0 )
lowercase :List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowercase :Any = DDIMScheduler()
torch.manual_seed(0 )
lowercase :Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase :Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowercase :Any = CLIPTextModel(_lowerCAmelCase )
lowercase :str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase :Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Dict=0 ):
lowercase :Any = torch.manual_seed(_lowerCAmelCase )
lowercase :Any = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :int = self.get_dummy_components()
lowercase :int = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :Tuple = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :List[str] = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :List[Any] = sd_pipe(**_lowerCAmelCase ).images
lowercase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :List[str] = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE ( self: int ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :List[str] = self.get_dummy_components()
lowercase :Optional[Any] = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :Any = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :Optional[int] = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :List[Any] = "french fries"
lowercase :int = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
lowercase :int = output.images
lowercase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :Optional[Any] = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :int = self.get_dummy_components()
lowercase :List[str] = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :int = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :Dict = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :Any = sd_pipe(**_lowerCAmelCase , view_batch_size=2 )
lowercase :Union[str, Any] = output.images
lowercase :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :Optional[int] = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :int = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :List[Any] = self.get_dummy_components()
lowercase :Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" )
lowercase :Tuple = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :Any = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :Optional[Any] = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :List[Any] = sd_pipe(**_lowerCAmelCase ).images
lowercase :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :Optional[Any] = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase :Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase :List[Any] = self.get_dummy_components()
lowercase :int = PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , skip_prk_steps=_lowerCAmelCase )
lowercase :List[str] = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase :int = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase :Union[str, Any] = self.get_dummy_inputs(_lowerCAmelCase )
lowercase :Any = sd_pipe(**_lowerCAmelCase ).images
lowercase :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase :Tuple = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self: str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Union[str, Any]=0 ):
lowercase :Any = torch.manual_seed(_lowerCAmelCase )
lowercase :Optional[int] = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Dict = "stabilityai/stable-diffusion-2-base"
lowercase :Optional[Any] = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
lowercase :List[str] = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase :Any = self.get_inputs()
lowercase :Optional[int] = pipe(**_lowerCAmelCase ).images
lowercase :int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowercase :Tuple = np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Union[str, Any] = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=_lowerCAmelCase )
lowercase :int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase :int = self.get_inputs()
lowercase :Optional[int] = pipe(**_lowerCAmelCase ).images
lowercase :List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowercase :Union[str, Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Dict = 0
def callback_fn(_lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: torch.FloatTensor ) -> None:
lowercase :Any = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase :Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowercase :Optional[int] = latents[0, -3:, -3:, -1]
lowercase :Any = np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase :str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowercase :Optional[int] = latents[0, -3:, -3:, -1]
lowercase :Optional[Any] = np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase :int = False
lowercase :Tuple = "stabilityai/stable-diffusion-2-base"
lowercase :Optional[Any] = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
lowercase :Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase :Optional[int] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase :int = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def SCREAMING_SNAKE_CASE ( self: str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase :Optional[Any] = "stabilityai/stable-diffusion-2-base"
lowercase :Dict = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
lowercase :Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase :Union[str, Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase :Optional[int] = self.get_inputs()
lowercase :Union[str, Any] = pipe(**_lowerCAmelCase )
lowercase :List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 158
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = ["input_values", "padding_mask"]
def __init__( self , _UpperCAmelCase = 1 , _UpperCAmelCase = 24000 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: str = chunk_length_s
lowercase__: str = overlap
@property
def _snake_case ( self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _snake_case ( self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
lowercase__: Tuple = True
lowercase__: Any = bool(
isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
lowercase__: int = [np.asarray(_UpperCAmelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ):
lowercase__: Optional[Any] = np.asarray(_UpperCAmelCase , dtype=np.floataa )
elif isinstance(_UpperCAmelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowercase__: Union[str, Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__: Optional[Any] = [np.asarray(_UpperCAmelCase ).T]
# verify inputs are valid
for idx, example in enumerate(_UpperCAmelCase ):
if example.ndim > 2:
raise ValueError(F"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"""Expected stereo audio but example has {example.shape[-1]} channels""" )
lowercase__: Any = None
lowercase__: Any = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowercase__: Any = min(array.shape[0] for array in raw_audio )
lowercase__: Dict = int(np.floor(max_length / self.chunk_stride ) )
lowercase__: Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowercase__: int = max(array.shape[0] for array in raw_audio )
lowercase__: str = int(np.ceil(max_length / self.chunk_stride ) )
lowercase__: Any = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowercase__: List[Any] = '''max_length'''
else:
lowercase__: List[str] = input_values
# normal padding on batch
if padded_inputs is None:
lowercase__: Tuple = self.pad(
_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , padding=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
if padding:
lowercase__: Tuple = padded_inputs.pop('''attention_mask''' )
lowercase__: List[Any] = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
lowercase__: Any = example[..., None]
input_values.append(example.T )
lowercase__: Optional[Any] = input_values
if return_tensors is not None:
lowercase__: Optional[Any] = padded_inputs.convert_to_tensors(_UpperCAmelCase )
return padded_inputs
| 177
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__A = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__A = "UperNetConfig"
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0 , _UpperCAmelCase = False , _UpperCAmelCase = 1 , ):
super().__init__()
lowercase__: List[Any] = nn.Convad(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , kernel_size=_UpperCAmelCase , padding=_UpperCAmelCase , bias=_UpperCAmelCase , dilation=_UpperCAmelCase , )
lowercase__: List[Any] = nn.BatchNormad(_UpperCAmelCase )
lowercase__: int = nn.ReLU()
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Dict = self.conv(_UpperCAmelCase )
lowercase__: Optional[int] = self.batch_norm(_UpperCAmelCase )
lowercase__: List[Any] = self.activation(_UpperCAmelCase )
return output
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
lowercase__: int = [
nn.AdaptiveAvgPoolad(_UpperCAmelCase ),
UperNetConvModule(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_UpperCAmelCase ) , _UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Any = input
for layer in self.layers:
lowercase__: Any = layer(_UpperCAmelCase )
return hidden_state
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
lowercase__: int = pool_scales
lowercase__: Optional[Any] = align_corners
lowercase__: Optional[int] = in_channels
lowercase__: Optional[Any] = channels
lowercase__: List[Any] = []
for i, pool_scale in enumerate(_UpperCAmelCase ):
lowercase__: Optional[int] = UperNetPyramidPoolingBlock(pool_scale=_UpperCAmelCase , in_channels=_UpperCAmelCase , channels=_UpperCAmelCase )
self.blocks.append(_UpperCAmelCase )
self.add_module(str(_UpperCAmelCase ) , _UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Union[str, Any] = []
for ppm in self.blocks:
lowercase__: Tuple = ppm(_UpperCAmelCase )
lowercase__: Any = nn.functional.interpolate(
_UpperCAmelCase , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(_UpperCAmelCase )
return ppm_outs
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
lowercase__: Optional[int] = config
lowercase__: int = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase__: Optional[int] = in_channels
lowercase__: List[str] = config.hidden_size
lowercase__: List[str] = False
lowercase__: List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowercase__: Dict = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowercase__: int = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowercase__: List[Any] = nn.ModuleList()
lowercase__: Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase__: int = UperNetConvModule(_UpperCAmelCase , self.channels , kernel_size=1 )
lowercase__: Dict = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_UpperCAmelCase )
self.fpn_convs.append(_UpperCAmelCase )
lowercase__: Any = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _snake_case ( self ):
self.apply(self._init_weights )
def _snake_case ( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = inputs[-1]
lowercase__: str = [x]
psp_outs.extend(self.psp_modules(_UpperCAmelCase ) )
lowercase__: Dict = torch.cat(_UpperCAmelCase , dim=1 )
lowercase__: Tuple = self.bottleneck(_UpperCAmelCase )
return output
def _snake_case ( self , _UpperCAmelCase ):
# build laterals
lowercase__: Dict = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_UpperCAmelCase ) )
# build top-down path
lowercase__: int = len(_UpperCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase__: str = laterals[i - 1].shape[2:]
lowercase__: Optional[int] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_UpperCAmelCase , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
lowercase__: str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase__: Any = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
lowercase__: int = torch.cat(_UpperCAmelCase , dim=1 )
lowercase__: Tuple = self.fpn_bottleneck(_UpperCAmelCase )
lowercase__: Dict = self.classifier(_UpperCAmelCase )
return output
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 ):
super().__init__()
lowercase__: Optional[Any] = config
lowercase__: Optional[Any] = config.auxiliary_in_channels
lowercase__: List[Any] = config.auxiliary_channels
lowercase__: Tuple = config.auxiliary_num_convs
lowercase__: Any = config.auxiliary_concat_input
lowercase__: Optional[int] = in_index
lowercase__: Tuple = (kernel_size // 2) * dilation
lowercase__: Tuple = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_UpperCAmelCase , padding=_UpperCAmelCase , dilation=_UpperCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_UpperCAmelCase , padding=_UpperCAmelCase , dilation=_UpperCAmelCase ) )
if self.num_convs == 0:
lowercase__: List[Any] = nn.Identity()
else:
lowercase__: Union[str, Any] = nn.Sequential(*_UpperCAmelCase )
if self.concat_input:
lowercase__: Dict = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_UpperCAmelCase , padding=kernel_size // 2 )
lowercase__: Union[str, Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _snake_case ( self ):
self.apply(self._init_weights )
def _snake_case ( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _snake_case ( self , _UpperCAmelCase ):
# just take the relevant feature maps
lowercase__: Dict = encoder_hidden_states[self.in_index]
lowercase__: Optional[int] = self.convs(_UpperCAmelCase )
if self.concat_input:
lowercase__: Optional[int] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowercase__: Dict = self.classifier(_UpperCAmelCase )
return output
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = UperNetConfig
_UpperCAmelCase :int = "pixel_values"
_UpperCAmelCase :Optional[Any] = True
def _snake_case ( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _snake_case ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = value
__A = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." ,_UpperCAmelCase ,)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
lowercase__: Optional[int] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase__: Any = UperNetHead(_UpperCAmelCase , in_channels=self.backbone.channels )
lowercase__: Tuple = UperNetFCNHead(_UpperCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def _snake_case ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
lowercase__: Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: str = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase__: List[str] = self.backbone.forward_with_filtered_kwargs(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , output_attentions=_UpperCAmelCase )
lowercase__: Tuple = outputs.feature_maps
lowercase__: Union[str, Any] = self.decode_head(_UpperCAmelCase )
lowercase__: str = nn.functional.interpolate(_UpperCAmelCase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_UpperCAmelCase )
lowercase__: Any = None
if self.auxiliary_head is not None:
lowercase__: Union[str, Any] = self.auxiliary_head(_UpperCAmelCase )
lowercase__: Tuple = nn.functional.interpolate(
_UpperCAmelCase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_UpperCAmelCase )
lowercase__: List[Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
lowercase__: List[str] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase__: Optional[Any] = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase__: Tuple = (logits,) + outputs[1:]
else:
lowercase__: Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 177
| 1
|
import math
import os
import sys
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> str:
SCREAMING_SNAKE_CASE_ = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
SCREAMING_SNAKE_CASE_ = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE_ = f"{dat:08b}"
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase_ ( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ) -> None:
lexicon.pop(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE_ = '0' + lexicon[curr_key]
SCREAMING_SNAKE_CASE_ = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> str:
SCREAMING_SNAKE_CASE_ = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = '', ''
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE_ = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
SCREAMING_SNAKE_CASE_ = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE_ = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> str:
SCREAMING_SNAKE_CASE_ = os.path.getsize(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = bin(__UpperCAmelCase )[2:]
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> None:
SCREAMING_SNAKE_CASE_ = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
SCREAMING_SNAKE_CASE_ = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> None:
SCREAMING_SNAKE_CASE_ = read_file_binary(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = compress_data(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 210
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCamelCase__ : Dict = TypeVar('T')
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return (position - 1) // 2
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return (2 * position) + 1
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return (2 * position) + 2
class lowerCamelCase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
def __len__( self : Optional[Any] ):
return self.elements
def __repr__( self : Optional[int] ):
return str(self.heap )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# Check if the priority queue is empty
return self.elements == 0
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : T , _lowerCAmelCase : int ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
SCREAMING_SNAKE_CASE_ = self.elements
self.elements += 1
self._bubble_up(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[0]
self._bubble_down(_lowerCAmelCase )
return elem
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : T , _lowerCAmelCase : int ):
# Update the weight of the given key
SCREAMING_SNAKE_CASE_ = self.position_map[elem]
SCREAMING_SNAKE_CASE_ = (elem, weight)
if position > 0:
SCREAMING_SNAKE_CASE_ = get_parent_position(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_lowerCAmelCase )
else:
self._bubble_down(_lowerCAmelCase )
else:
self._bubble_down(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : T ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
SCREAMING_SNAKE_CASE_ = self.position_map[elem]
if curr_pos == 0:
return None
SCREAMING_SNAKE_CASE_ = get_parent_position(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[curr_pos]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_up(_lowerCAmelCase )
return None
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : T ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
SCREAMING_SNAKE_CASE_ = self.position_map[elem]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[curr_pos]
SCREAMING_SNAKE_CASE_ = get_child_left_position(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = get_child_right_position(_lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[child_left_position]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
if child_left_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
return None
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# Swap the nodes at the given positions
SCREAMING_SNAKE_CASE_ = self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE_ = self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
SCREAMING_SNAKE_CASE_ = nodea_pos
SCREAMING_SNAKE_CASE_ = nodea_pos
class lowerCamelCase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
def __repr__( self : Optional[int] ):
return str(self.connections )
def __len__( self : Tuple ):
return self.nodes
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : T ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
SCREAMING_SNAKE_CASE_ = {}
self.nodes += 1
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : T , _lowerCAmelCase : T , _lowerCAmelCase : int ):
# Add an edge between 2 nodes in the graph
self.add_node(_lowerCAmelCase )
self.add_node(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = weight
SCREAMING_SNAKE_CASE_ = weight
def UpperCAmelCase_ ( __UpperCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
SCREAMING_SNAKE_CASE_ = {node: maxsize for node in graph.connections}
SCREAMING_SNAKE_CASE_ = {node: None for node in graph.connections}
SCREAMING_SNAKE_CASE_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__UpperCAmelCase , __UpperCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
SCREAMING_SNAKE_CASE_ = priority_queue.extract_min()
SCREAMING_SNAKE_CASE_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCAmelCase , dist[neighbour] )
SCREAMING_SNAKE_CASE_ = node
# running prim's algorithm
while not priority_queue.is_empty():
SCREAMING_SNAKE_CASE_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCAmelCase , dist[neighbour] )
SCREAMING_SNAKE_CASE_ = node
return dist, parent
| 210
| 1
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_snake_case : Optional[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_snake_case : List[str] = {
"allenai/led-base-16384": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase_ ( ):
__snake_case : Optional[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__snake_case : Optional[int] = bs[:]
__snake_case : int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCamelCase )
cs.append(2**8 + n )
n += 1
__snake_case : int = [chr(__lowerCamelCase ) for n in cs]
return dict(zip(__lowerCamelCase , __lowerCamelCase ) )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : int = set()
__snake_case : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case : int = char
return pairs
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : Optional[int]="replace" , lowerCamelCase : Tuple="<s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : Any="</s>" , lowerCamelCase : Tuple="<s>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : Dict="<pad>" , lowerCamelCase : List[str]="<mask>" , lowerCamelCase : List[str]=False , **lowerCamelCase : str , ) -> Optional[Any]:
__snake_case : Union[str, Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
__snake_case : Dict = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__snake_case : int = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
__snake_case : Any = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
__snake_case : int = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
__snake_case : Union[str, Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case : Union[str, Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__snake_case : Tuple = json.load(lowerCamelCase )
__snake_case : Optional[int] = {v: k for k, v in self.encoder.items()}
__snake_case : List[Any] = errors # how to handle errors in decoding
__snake_case : List[str] = bytes_to_unicode()
__snake_case : str = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
__snake_case : List[str] = merges_handle.read().split("\n" )[1:-1]
__snake_case : int = [tuple(merge.split() ) for merge in bpe_merges]
__snake_case : Union[str, Any] = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__snake_case : Optional[int] = {}
__snake_case : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__snake_case : str = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __snake_case ( self : Any ) -> Tuple:
return len(self.encoder )
def __snake_case ( self : Optional[int] ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self : str , lowerCamelCase : int ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
__snake_case : List[Any] = tuple(lowerCamelCase )
__snake_case : List[Any] = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
__snake_case : Any = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case , __snake_case : int = bigram
__snake_case : Tuple = []
__snake_case : int = 0
while i < len(lowerCamelCase ):
try:
__snake_case : Dict = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case : List[str] = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case : Union[str, Any] = tuple(lowerCamelCase )
__snake_case : List[Any] = new_word
if len(lowerCamelCase ) == 1:
break
else:
__snake_case : Dict = get_pairs(lowerCamelCase )
__snake_case : Optional[Any] = " ".join(lowerCamelCase )
__snake_case : Optional[Any] = word
return word
def __snake_case ( self : Tuple , lowerCamelCase : List[Any] ) -> List[Any]:
__snake_case : Optional[int] = []
for token in re.findall(self.pat , lowerCamelCase ):
__snake_case : Union[str, Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) )
return bpe_tokens
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] ) -> Optional[int]:
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : str ) -> Union[str, Any]:
return self.decoder.get(lowerCamelCase )
def __snake_case ( self : Any , lowerCamelCase : List[str] ) -> Optional[Any]:
__snake_case : List[Any] = "".join(lowerCamelCase )
__snake_case : Any = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __snake_case ( self : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case : str = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__snake_case : Optional[Any] = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
__snake_case : Any = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
__snake_case : List[str] = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def __snake_case ( self : List[Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Dict = [self.cls_token_id]
__snake_case : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
__snake_case : Union[str, Any] = [self.sep_token_id]
__snake_case : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case ( self : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any]=False , **lowerCamelCase : Tuple ) -> int:
__snake_case : List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
__snake_case : List[Any] = " " + text
return (text, kwargs)
def __snake_case ( self : Optional[Any] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ) -> dict:
__snake_case : Optional[int] = super()._pad(
encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__snake_case : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case : Optional[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case : Any = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase )
if needs_to_be_padded:
__snake_case : str = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case : List[Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 123
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : Union[str, Any] = "▁"
_snake_case : Any = {"vocab_file": "prophetnet.tokenizer"}
_snake_case : Tuple = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
_snake_case : Any = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
_snake_case : Any = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Union[str, Any] = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
__snake_case : Dict = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
__snake_case : Optional[int] = token.rstrip("\n" )
__snake_case : Union[str, Any] = index
return vocab
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
__UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , lowerCamelCase : str , lowerCamelCase : Optional[Any]="[SEP]" , lowerCamelCase : List[str]="[SEP]" , lowerCamelCase : Dict="[SEP]" , lowerCamelCase : Optional[int]="[UNK]" , lowerCamelCase : Dict="[PAD]" , lowerCamelCase : str="[CLS]" , lowerCamelCase : str="[MASK]" , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : int , ) -> None:
__snake_case : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
__snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
__snake_case : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__snake_case : List[str] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
__snake_case : List[str] = F'[unused{i}]'
__snake_case : int = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__snake_case : Any = 12
__snake_case : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowerCamelCase )
def __getstate__( self : Union[str, Any] ) -> Union[str, Any]:
__snake_case : Union[str, Any] = self.__dict__.copy()
__snake_case : int = None
return state
def __setstate__( self : Tuple , lowerCamelCase : List[str] ) -> Tuple:
__snake_case : Tuple = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case : Tuple = {}
__snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : Dict , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return ([0] * len(lowerCamelCase )) + [1]
return ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1]
def __snake_case ( self : Any , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
__snake_case : str = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self : str ) -> str:
return len(self.sp_model ) + self.fairseq_offset
def __snake_case ( self : Any ) -> int:
__snake_case : Dict = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self : str , lowerCamelCase : str ) -> str:
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def __snake_case ( self : List[str] , lowerCamelCase : Dict ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case : List[Any] = self.sp_model.PieceToId(lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case ( self : Dict , lowerCamelCase : Optional[int] ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case ( self : int , lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
__snake_case : str = "".join(lowerCamelCase ).replace(lowerCamelCase , " " ).strip()
return out_string
def __snake_case ( self : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case : List[str] = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , "wb" ) as fi:
__snake_case : Any = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__snake_case : List[str] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 123
| 1
|
import inspect
import unittest
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase__ ( self ):
"""simple docstring"""
import diffusers
from diffusers.dependency_versions_table import deps
lowerCAmelCase__ = inspect.getmembers(_UpperCamelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowerCAmelCase__ = 'k-diffusion'
elif backend == "invisible_watermark":
lowerCAmelCase__ = 'invisible-watermark'
assert backend in deps, F"{backend} is not in the deps table!"
| 122
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : int = KandinskyVaaInpaintPipeline
_SCREAMING_SNAKE_CASE : int = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_SCREAMING_SNAKE_CASE : Optional[Any] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_SCREAMING_SNAKE_CASE : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_SCREAMING_SNAKE_CASE : Optional[Any] = False
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1_00
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase__ = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.dummy_unet
lowerCAmelCase__ = self.dummy_movq
lowerCAmelCase__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=_UpperCamelCase , )
lowerCAmelCase__ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCamelCase )
# create init_image
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('RGB' ).resize((2_56, 2_56) )
# create mask
lowerCAmelCase__ = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase__ = 0
if str(_UpperCamelCase ).startswith('mps' ):
lowerCAmelCase__ = torch.manual_seed(_UpperCamelCase )
else:
lowerCAmelCase__ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
lowerCAmelCase__ = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'cpu'
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**_UpperCamelCase )
lowerCAmelCase__ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
lowerCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCAmelCase__ = np.ones((7_68, 7_68) , dtype=np.floataa )
lowerCAmelCase__ = 0
lowerCAmelCase__ = 'a hat'
lowerCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
lowerCAmelCase__ = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ = pipe_prior(
_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase__ = pipeline(
image=_UpperCamelCase , mask_image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
| 122
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.