code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Tuple):
lowercase__ : Tuple = AutoConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase)
lowercase__ : Tuple = AutoModelForSeqaSeqLM.from_config(_lowerCamelCase)
model.save_pretrained(_lowerCamelCase)
AutoTokenizer.from_pretrained(_lowerCamelCase).save_pretrained(_lowerCamelCase)
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 87 |
from maths.prime_factors import prime_factors
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
a :Dict = F'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCAmelCase_ )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(UpperCAmelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
__lowerCamelCase = TypeVar("""T""")
class UpperCAmelCase ( Generic[T] ):
def __init__(self : str , snake_case__ : Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = data
snake_case : Optional[int] = self
snake_case : Union[str, Any] = 0
class UpperCAmelCase ( Generic[T] ):
def __init__(self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : Dict = {}
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Tuple ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = DisjointSetTreeNode(__SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = self.map[data]
if elem_ref != elem_ref.parent:
snake_case : List[str] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Dict , snake_case__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if nodea.rank > nodea.rank:
snake_case : Dict = nodea
else:
snake_case : Tuple = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Dict , snake_case__ : str ) -> List[str]:
'''simple docstring'''
self.link(self.find_set(__SCREAMING_SNAKE_CASE ) , self.find_set(__SCREAMING_SNAKE_CASE ) )
class UpperCAmelCase ( Generic[T] ):
def __init__(self : str ) -> Any:
'''simple docstring'''
snake_case : int = {}
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Any ) -> Tuple:
'''simple docstring'''
if node not in self.connections:
snake_case : Any = {}
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Dict , snake_case__ : Any , snake_case__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.add_node(__SCREAMING_SNAKE_CASE )
self.add_node(__SCREAMING_SNAKE_CASE )
snake_case : Dict = weight
snake_case : Tuple = weight
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Any = []
snake_case : Union[str, Any] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case__ : x[2] )
# creating the disjoint set
snake_case : Optional[int] = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__SCREAMING_SNAKE_CASE )
# MST generation
snake_case : List[Any] = 0
snake_case : Optional[int] = 0
snake_case : Tuple = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
snake_case , snake_case , snake_case : Any = edges[index]
index += 1
snake_case : Any = disjoint_set.find_set(__SCREAMING_SNAKE_CASE )
snake_case : int = disjoint_set.find_set(__SCREAMING_SNAKE_CASE )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
disjoint_set.union(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return graph
| 351 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
A__ : int = ["pixel_values"]
def __init__(self : Tuple , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : int = 8 , **snake_case__ : Dict , ) -> None:
'''simple docstring'''
super().__init__(**snake_case__ )
snake_case : int = do_rescale
snake_case : List[str] = rescale_factor
snake_case : Optional[Any] = do_pad
snake_case : Dict = pad_size
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : np.ndarray , snake_case__ : float , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : List[str] ) -> np.ndarray:
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : Optional[Union[str, ChannelDimension]] = None ) -> Dict:
'''simple docstring'''
snake_case , snake_case : Union[str, Any] = get_image_size(snake_case__ )
snake_case : str = (old_height // size + 1) * size - old_height
snake_case : List[str] = (old_width // size + 1) * size - old_width
return pad(snake_case__ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case__ : List[Any] , ) -> Tuple:
'''simple docstring'''
snake_case : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Optional[Any] = do_pad if do_pad is not None else self.do_pad
snake_case : Dict = pad_size if pad_size is not None else self.pad_size
snake_case : Union[str, Any] = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
snake_case : str = [to_numpy_array(snake_case__ ) for image in images]
if do_rescale:
snake_case : str = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_pad:
snake_case : List[Any] = [self.pad(snake_case__ , size=snake_case__ ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
snake_case : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 10 | 0 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class _UpperCAmelCase ( lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = ComputeEnvironment.AMAZON_SAGEMAKER
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'ml.p3.2xlarge'
SCREAMING_SNAKE_CASE_ : List[Any] = 'accelerate_sagemaker_execution_role'
SCREAMING_SNAKE_CASE_ : Any = 'hf-sm'
SCREAMING_SNAKE_CASE_ : Optional[int] = 'us-east-1'
SCREAMING_SNAKE_CASE_ : str = 1
SCREAMING_SNAKE_CASE_ : Dict = 'accelerate-sagemaker-1'
SCREAMING_SNAKE_CASE_ : str = '1.6'
SCREAMING_SNAKE_CASE_ : Dict = '4.4'
SCREAMING_SNAKE_CASE_ : List[str] = 'train.py'
SCREAMING_SNAKE_CASE_ : Optional[int] = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Tuple ) -> Tuple:
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowercase_ : Any = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , __lowercase )
assert isinstance(converted_args['''do_train'''] , __lowercase )
assert isinstance(converted_args['''epochs'''] , __lowercase )
assert isinstance(converted_args['''learning_rate'''] , __lowercase )
assert isinstance(converted_args['''max_steps'''] , __lowercase )
with pytest.raises(__lowercase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 33 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a : Union[str, Any] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Any = ['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : int = 0.9 , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : Union[int, float] = 1 / 255 , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Any , ) -> None:
super().__init__(**__lowercase )
__UpperCAmelCase : Tuple = size if size is not None else {"""shortest_edge""": 224}
__UpperCAmelCase : Union[str, Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
__UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__UpperCAmelCase : Any = get_size_dict(__lowercase , param_name="""crop_size""" )
__UpperCAmelCase : Dict = do_resize
__UpperCAmelCase : Dict = size
__UpperCAmelCase : Tuple = crop_pct
__UpperCAmelCase : List[Any] = resample
__UpperCAmelCase : List[Any] = do_center_crop
__UpperCAmelCase : List[Any] = crop_size
__UpperCAmelCase : Any = do_rescale
__UpperCAmelCase : Tuple = rescale_factor
__UpperCAmelCase : int = do_normalize
__UpperCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self : Tuple , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[float] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[int] , ) -> np.ndarray:
__UpperCAmelCase : Tuple = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
__UpperCAmelCase : Union[str, Any] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__UpperCAmelCase : Tuple = int(size["""height"""] / crop_pct )
else:
__UpperCAmelCase : str = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(__lowercase ) )
__UpperCAmelCase : str = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
else:
if "shortest_edge" in size:
__UpperCAmelCase : List[str] = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
elif "height" in size and "width" in size:
__UpperCAmelCase : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(__lowercase ) )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase ( self : Dict , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Union[str, Any] , ) -> np.ndarray:
__UpperCAmelCase : Optional[Any] = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : np.ndarray , __lowercase : Union[int, float] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : int , ) -> int:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase ( self : Any , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : int = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : ChannelDimension = ChannelDimension.FIRST , **__lowercase : List[str] , ) -> PIL.Image.Image:
__UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
__UpperCAmelCase : Optional[Any] = resample if resample is not None else self.resample
__UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : Tuple = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : Any = image_std if image_std is not None else self.image_std
__UpperCAmelCase : Optional[int] = size if size is not None else self.size
__UpperCAmelCase : Dict = get_size_dict(__lowercase , default_to_square=__lowercase )
__UpperCAmelCase : Tuple = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase : Tuple = get_size_dict(__lowercase , param_name="""crop_size""" )
__UpperCAmelCase : Dict = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__UpperCAmelCase : str = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__UpperCAmelCase : str = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__UpperCAmelCase : Any = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__UpperCAmelCase : List[str] = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__UpperCAmelCase : str = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__UpperCAmelCase : List[str] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__UpperCAmelCase : Any = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 114 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : Optional[int] = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE : Dict = {
"""camembert-base""": 512,
}
SCREAMING_SNAKE_CASE : Tuple = """▁"""
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =CamembertTokenizer
def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=["<s>NOTUSED", "</s>NOTUSED"] , **a_ , ):
'''simple docstring'''
__snake_case : Any = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , additional_special_tokens=a_ , **a_ , )
__snake_case : List[str] = vocab_file
__snake_case : List[str] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
__snake_case : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case : Any = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 360 |
"""simple docstring"""
def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = len(_snake_case )
__snake_case : str = sum(_snake_case )
__snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__snake_case : Optional[Any] = True
for i in range(1 , s + 1 ):
__snake_case : int = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__snake_case : Union[str, Any] = dp[i][j - 1]
if arr[i - 1] <= j:
__snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__snake_case : List[str] = s - 2 * j
break
return diff
| 24 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__A = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__A = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCAmelCase_ ( __a , __a ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(__a ) - np.asarray(__a )) ** 2 ) )
def lowerCAmelCase_ ( __a , __a ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(__a , __a ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=10000 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=10000 , globals=globals() , ) )
benchmark()
| 10 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = '''mvp'''
UpperCamelCase : Union[str, Any] = ['''past_key_values''']
UpperCamelCase : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCAmelCase__ : List[str]=50267 , UpperCAmelCase__ : Optional[Any]=1024 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Optional[Any]=4096 , UpperCAmelCase__ : int=16 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : int=4096 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=1024 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Tuple=0.0_2 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=100 , UpperCAmelCase__ : Union[str, Any]=800 , **UpperCAmelCase__ : Dict , ) -> List[Any]:
_a : Any = vocab_size
_a : Any = max_position_embeddings
_a : Union[str, Any] = d_model
_a : List[str] = encoder_ffn_dim
_a : List[Any] = encoder_layers
_a : Dict = encoder_attention_heads
_a : Tuple = decoder_ffn_dim
_a : List[Any] = decoder_layers
_a : Optional[Any] = decoder_attention_heads
_a : Optional[Any] = dropout
_a : str = attention_dropout
_a : Dict = activation_dropout
_a : Any = activation_function
_a : Tuple = init_std
_a : Dict = encoder_layerdrop
_a : Optional[int] = decoder_layerdrop
_a : Optional[Any] = classifier_dropout
_a : List[Any] = use_cache
_a : Dict = encoder_layers
_a : str = scale_embedding # scale factor will be sqrt(d_model) if True
_a : int = use_prompt
_a : Dict = prompt_length
_a : Dict = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase__ ):
_a : List[str] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 294 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = generate_pascal_triangle(lowercase__ )
for row_idx in range(lowercase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = []
for current_row_idx in range(lowercase__ ):
A = populate_current_row(lowercase__ , lowercase__ )
triangle.append(lowercase__ )
return triangle
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
A = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A , A = 1, 1
for current_col_idx in range(1 , lowercase__ ):
calculate_current_element(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return current_row
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
"""simple docstring"""
A = triangle[current_row_idx - 1][current_col_idx - 1]
A = triangle[current_row_idx - 1][current_col_idx]
A = above_to_left_elt + above_to_right_elt
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = [[1]]
for row_index in range(1 , lowercase__ ):
A = [0] + result[-1] + [0]
A = row_index + 1
# Calculate the number of distinct elements in a row
A = sum(divmod(lowercase__ , 2 ) )
A = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A = row_first_half + row_second_half
result.append(lowercase__ )
return result
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase__ , lowercase__ ) -> None:
A = F"""{func.__name__}({value})"""
A = timeit(F"""__main__.{call}""" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowercase__ , lowercase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 364 |
"""simple docstring"""
from __future__ import annotations
class __UpperCamelCase :
def __init__(self : Tuple , __SCREAMING_SNAKE_CASE : int = 0):
A = key
def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(__SCREAMING_SNAKE_CASE) ^ key) for ch in content]
def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(__SCREAMING_SNAKE_CASE) ^ key) for ch in content]
def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 0):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
A = ""
for ch in content:
ans += chr(ord(__SCREAMING_SNAKE_CASE) ^ key)
return ans
def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 0):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
A = ""
for ch in content:
ans += chr(ord(__SCREAMING_SNAKE_CASE) ^ key)
return ans
def SCREAMING_SNAKE_CASE__ (self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 0):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
try:
with open(__SCREAMING_SNAKE_CASE) as fin, open("encrypt.out" , "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
except OSError:
return False
return True
def SCREAMING_SNAKE_CASE__ (self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
try:
with open(__SCREAMING_SNAKE_CASE) as fin, open("decrypt.out" , "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 57 | 0 |
"""simple docstring"""
import os
import sys
SCREAMING_SNAKE_CASE__ = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
SCREAMING_SNAKE_CASE__ = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return AutoConfig.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
return AutoModel.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCAmelCase__ ( *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 46 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
debug_launcher(test_ops.main )
| 16 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase_ ( lowerCamelCase_ , unittest.TestCase):
"""simple docstring"""
snake_case__ : Dict = KandinskyVaaControlnetImgaImgPipeline
snake_case__ : Any = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
snake_case__ : Dict = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
snake_case__ : Union[str, Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
snake_case__ : str = False
@property
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
return 3_2
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
return 3_2
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self : List[str] ) -> Any:
return 1_0_0
@property
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(**__snake_case )
return model
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.dummy_unet
__SCREAMING_SNAKE_CASE = self.dummy_movq
__SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__SCREAMING_SNAKE_CASE = DDIMScheduler(**__snake_case )
__SCREAMING_SNAKE_CASE = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str=0 ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
__SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__snake_case ) ).to(__snake_case )
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(__snake_case ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create hint
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith("mps" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(__snake_case )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__SCREAMING_SNAKE_CASE = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = "cpu"
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**__snake_case )
__SCREAMING_SNAKE_CASE = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(__snake_case ) )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : int ) -> List[Any]:
__SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
__SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__SCREAMING_SNAKE_CASE = init_image.resize((5_1_2, 5_1_2) )
__SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
__SCREAMING_SNAKE_CASE = torch.from_numpy(np.array(__snake_case ) ).float() / 2_5_5.0
__SCREAMING_SNAKE_CASE = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__SCREAMING_SNAKE_CASE = "A robot, 4k photo"
__SCREAMING_SNAKE_CASE = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__SCREAMING_SNAKE_CASE = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__SCREAMING_SNAKE_CASE = torch.Generator(device="cpu" ).manual_seed(0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pipe_prior(
__snake_case , image=__snake_case , strength=0.85 , generator=__snake_case , negative_prompt="" , ).to_tuple()
__SCREAMING_SNAKE_CASE = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , hint=__snake_case , generator=__snake_case , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="np" , )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 371 |
"""simple docstring"""
from datetime import datetime
import requests
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
__SCREAMING_SNAKE_CASE = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
a__ : str = input('''Enter Video/IGTV url: ''').strip()
a__ : List[Any] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F"Done. Video saved to disk as {file_name}.")
| 195 | 0 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = "bart"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , A : Optional[int]=50_265 , A : List[Any]=1_024 , A : Tuple=12 , A : Optional[Any]=4_096 , A : str=16 , A : Any=12 , A : int=4_096 , A : List[str]=16 , A : Any=0.0 , A : Any=0.0 , A : Dict="gelu" , A : str=1_024 , A : Optional[int]=0.1 , A : List[str]=0.0 , A : List[str]=0.0 , A : Tuple=0.02 , A : Optional[Any]=0.0 , A : Any=False , A : List[Any]=True , A : Optional[Any]=3 , A : List[Any]=1 , A : List[str]=0 , A : Dict=2 , A : List[str]=True , A : List[str]=2 , A : Union[str, Any]=2 , **A : int , ):
__snake_case: Optional[Any] = vocab_size
__snake_case: Union[str, Any] = max_position_embeddings
__snake_case: List[str] = d_model
__snake_case: int = encoder_ffn_dim
__snake_case: str = encoder_layers
__snake_case: Dict = encoder_attention_heads
__snake_case: Union[str, Any] = decoder_ffn_dim
__snake_case: str = decoder_layers
__snake_case: Any = decoder_attention_heads
__snake_case: Optional[Any] = dropout
__snake_case: int = attention_dropout
__snake_case: int = activation_dropout
__snake_case: Union[str, Any] = activation_function
__snake_case: str = init_std
__snake_case: int = encoder_layerdrop
__snake_case: Optional[Any] = decoder_layerdrop
__snake_case: Union[str, Any] = classifier_dropout
__snake_case: str = use_cache
__snake_case: Any = encoder_layers
__snake_case: List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=A , pad_token_id=A , bos_token_id=A , eos_token_id=A , is_encoder_decoder=A , decoder_start_token_id=A , forced_eos_token_id=A , **A , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , A ):
__snake_case: Optional[Any] = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
__snake_case: Dict = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__snake_case: Optional[Any] = {0: """batch"""}
__snake_case: str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__snake_case: Tuple = {0: """batch""", 1: """decoder_sequence"""}
__snake_case: Dict = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(A , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__snake_case: str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__snake_case , __snake_case: Any = self.num_layers
for i in range(A ):
__snake_case: List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
__snake_case: List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__snake_case: Dict = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def UpperCAmelCase__ ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
__snake_case: Optional[Any] = super().outputs
else:
__snake_case: Union[str, Any] = super(A , self ).outputs
if self.use_past:
__snake_case , __snake_case: List[Any] = self.num_layers
for i in range(A ):
__snake_case: Dict = {0: """batch""", 2: """past_sequence + sequence"""}
__snake_case: int = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def UpperCAmelCase__ ( self : Optional[int] , A : PreTrainedTokenizer , A : int = -1 , A : int = -1 , A : bool = False , A : Optional[TensorType] = None , ):
__snake_case: Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , A , A , A , A )
# Generate decoder inputs
__snake_case: List[Any] = seq_length if not self.use_past else 1
__snake_case: Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , A , A , A , A )
__snake_case: Tuple = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__snake_case: Any = dict(**A , **A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__snake_case , __snake_case: Dict = common_inputs["""input_ids"""].shape
__snake_case: Dict = common_inputs["""decoder_input_ids"""].shape[1]
__snake_case , __snake_case: int = self.num_attention_heads
__snake_case: Dict = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case: int = decoder_seq_length + 3
__snake_case: Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__snake_case: Optional[Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(A , A )] , dim=1 )
__snake_case: Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__snake_case , __snake_case: str = self.num_layers
__snake_case: str = min(A , A )
__snake_case: Any = max(A , A ) - min_num_layers
__snake_case: List[Any] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(A ):
common_inputs["past_key_values"].append(
(
torch.zeros(A ),
torch.zeros(A ),
torch.zeros(A ),
torch.zeros(A ),
) )
# TODO: test this.
__snake_case: Optional[Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(A , A ):
common_inputs["past_key_values"].append((torch.zeros(A ), torch.zeros(A )) )
return common_inputs
def UpperCAmelCase__ ( self : List[str] , A : PreTrainedTokenizer , A : int = -1 , A : int = -1 , A : bool = False , A : Optional[TensorType] = None , ):
__snake_case: Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , A , A , A , A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__snake_case , __snake_case: Tuple = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__snake_case: Union[str, Any] = seqlen + 2
__snake_case , __snake_case: Dict = self.num_layers
__snake_case , __snake_case: Dict = self.num_attention_heads
__snake_case: Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case: Optional[Any] = common_inputs["""attention_mask"""].dtype
__snake_case: int = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(A , A , dtype=A )] , dim=1 )
__snake_case: List[str] = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(A )
]
return common_inputs
def UpperCAmelCase__ ( self : List[str] , A : PreTrainedTokenizer , A : int = -1 , A : int = -1 , A : bool = False , A : Optional[TensorType] = None , ):
__snake_case: Optional[Any] = compute_effective_axis_dimension(
A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case: List[Any] = tokenizer.num_special_tokens_to_add(A )
__snake_case: int = compute_effective_axis_dimension(
A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A )
# Generate dummy inputs according to compute batch and sequence
__snake_case: Union[str, Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__snake_case: Union[str, Any] = dict(tokenizer(A , return_tensors=A ) )
return common_inputs
def UpperCAmelCase__ ( self : List[Any] , A : PreTrainedTokenizer , A : int = -1 , A : int = -1 , A : bool = False , A : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
__snake_case: List[str] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
elif self.task == "causal-lm":
__snake_case: str = self._generate_dummy_inputs_for_causal_lm(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
else:
__snake_case: str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
return common_inputs
def UpperCAmelCase__ ( self : Tuple , A : Union[str, Any] , A : Tuple , A : int , A : int ):
if self.task in ["default", "seq2seq-lm"]:
__snake_case: Any = super()._flatten_past_key_values_(A , A , A , A )
else:
__snake_case: List[Any] = super(A , self )._flatten_past_key_values_(
A , A , A , A )
| 111 |
from collections.abc import Sequence
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
_snake_case = 0.0
for coeff in reversed(__lowercase ):
_snake_case = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : Optional[int] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x)) | 282 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a : Any = ProphetNetTokenizer
a : Any = False
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
super().setUp()
a = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCAmelCase_ ( self , A ) -> List[str]:
'''simple docstring'''
a = 'UNwant\u00E9d,running'
a = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
a = self.tokenizer_class(self.vocab_file )
a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
a = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
a = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
a = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
a = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
a = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
a = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
a = BasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
a = {}
for i, token in enumerate(__a ):
a = i
a = WordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
a = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
a = tokenizer(__a , padding=__a , return_tensors="pt" )
self.assertIsInstance(__a , __a )
a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__a , __a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__a )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__a )
a = tokenizer.build_inputs_with_special_tokens(__a )
a = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 350 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Dict = "▁"
lowercase__ : Union[str, Any] = {"vocab_file": "spiece.model"}
lowercase__ : Union[str, Any] = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
lowercase__ : Tuple = {
"google/reformer-crime-and-punishment": 524_288,
}
class a__ ( UpperCamelCase__ ):
a : List[Any] = VOCAB_FILES_NAMES
a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="</s>" , A="<unk>" , A=[] , A = None , **A , ) -> None:
'''simple docstring'''
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A , unk_token=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowerCAmelCase_ ( self ) -> Dict[str, int]:
'''simple docstring'''
a = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
'''simple docstring'''
a = self.__dict__.copy()
a = None
return state
def __setstate__( self , A ) -> Union[str, Any]:
'''simple docstring'''
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self , A ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(A , out_type=A )
def lowerCAmelCase_ ( self , A ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(A )
def lowerCAmelCase_ ( self , A ) -> Optional[int]:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(A )
return token
def lowerCAmelCase_ ( self , A ) -> Union[str, Any]:
'''simple docstring'''
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
a = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def lowerCAmelCase_ ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 180 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowercase__ : str = None
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Dict = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
lowercase__ : List[Any] = {
'''albert-base-v1''': 5_12,
'''albert-large-v1''': 5_12,
'''albert-xlarge-v1''': 5_12,
'''albert-xxlarge-v1''': 5_12,
'''albert-base-v2''': 5_12,
'''albert-large-v2''': 5_12,
'''albert-xlarge-v2''': 5_12,
'''albert-xxlarge-v2''': 5_12,
}
lowercase__ : str = '''▁'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = AlbertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , **_UpperCAmelCase , ):
'''simple docstring'''
__A : List[Any] = (
AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase)
if isinstance(_UpperCAmelCase , _UpperCAmelCase)
else mask_token
)
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
__A : Tuple = do_lower_case
__A : Any = remove_space
__A : Optional[Any] = keep_accents
__A : Any = vocab_file
__A : Dict = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : List[Any] = [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : Dict = [self.sep_token_id]
__A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : Union[str, Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase):
copyfile(self.vocab_file , _UpperCAmelCase)
return (out_vocab_file,) | 190 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Dict = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''roberta'''
def __init__( self , _UpperCAmelCase=5_0265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase)
__A : Optional[int] = vocab_size
__A : int = hidden_size
__A : Union[str, Any] = num_hidden_layers
__A : List[str] = num_attention_heads
__A : Optional[int] = hidden_act
__A : str = intermediate_size
__A : Union[str, Any] = hidden_dropout_prob
__A : Dict = attention_probs_dropout_prob
__A : int = max_position_embeddings
__A : str = type_vocab_size
__A : Any = initializer_range
__A : int = layer_norm_eps
__A : Optional[int] = position_embedding_type
__A : int = use_cache
__A : Union[str, Any] = classifier_dropout
class SCREAMING_SNAKE_CASE (a__ ):
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.task == "multiple-choice":
__A : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
]) | 190 | 1 |
"""simple docstring"""
from math import pi
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 352 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_lowercase)
class __snake_case ( _lowercase):
def __init__( self : Any , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(__lowerCAmelCase )
def __call__( self : Dict , __lowerCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , __lowerCAmelCase : Union[str, List[str]] = None , **__lowerCAmelCase : int , ):
"""simple docstring"""
if "text_queries" in kwargs:
_lowerCamelCase : List[Any] = kwargs.pop('''text_queries''' )
if isinstance(__lowerCAmelCase , (str, Image.Image) ):
_lowerCamelCase : Optional[int] = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
_lowerCamelCase : List[Any] = image
_lowerCamelCase : List[str] = super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
return results
def SCREAMING_SNAKE_CASE ( self : List[Any] , **__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = {}
if "threshold" in kwargs:
_lowerCamelCase : Optional[Any] = kwargs['''threshold''']
if "top_k" in kwargs:
_lowerCamelCase : int = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = load_image(inputs['''image'''] )
_lowerCamelCase : Optional[Any] = inputs['''candidate_labels''']
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : int = candidate_labels.split(''',''' )
_lowerCamelCase : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__lowerCAmelCase ):
_lowerCamelCase : Any = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework )
_lowerCamelCase : Optional[Any] = self.image_processor(__lowerCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(__lowerCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = model_inputs.pop('''target_size''' )
_lowerCamelCase : List[Any] = model_inputs.pop('''candidate_label''' )
_lowerCamelCase : Dict = model_inputs.pop('''is_last''' )
_lowerCamelCase : str = self.model(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[Any]=None ):
"""simple docstring"""
_lowerCamelCase : str = []
for model_output in model_outputs:
_lowerCamelCase : Any = model_output['''candidate_label''']
_lowerCamelCase : Union[str, Any] = BaseModelOutput(__lowerCAmelCase )
_lowerCamelCase : Tuple = self.image_processor.post_process_object_detection(
outputs=__lowerCAmelCase , threshold=__lowerCAmelCase , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
_lowerCamelCase : Tuple = outputs['''scores'''][index].item()
_lowerCamelCase : Optional[Any] = self._get_bounding_box(outputs['''boxes'''][index][0] )
_lowerCamelCase : Optional[Any] = {'''score''': score, '''label''': label, '''box''': box}
results.append(__lowerCAmelCase )
_lowerCamelCase : int = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x["score"] , reverse=__lowerCAmelCase )
if top_k:
_lowerCamelCase : Dict = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = box.int().tolist()
_lowerCamelCase : Union[str, Any] = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 175 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __magic_name__ ( unittest.TestCase ):
@slow
def __lowercase ( self : Union[str, Any] ):
_a : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_a : List[Any] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_a : Optional[Any] = 'The dog is cute and lives in the garden house'
_a : int = jnp.array([tokenizer.encode(_UpperCAmelCase )] )
_a : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_a : Dict = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_a : Tuple = model(_UpperCAmelCase )['last_hidden_state']
self.assertEqual(output.shape ,_UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,_UpperCAmelCase ,atol=1E-3 ) )
| 89 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : int = 32 , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[int, float] = 1 / 255 , __UpperCAmelCase : bool = True , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073] , __UpperCAmelCase : Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711] , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : List[Any]=30 , __UpperCAmelCase : List[str]=400 , __UpperCAmelCase : Union[str, Any]=3 , ) ->List[str]:
"""simple docstring"""
a = parent
a = do_resize
a = size if size is not None else {'''shortest_edge''': 288}
a = size_divisor
a = do_rescale
a = rescale_factor
a = do_normalize
a = do_center_crop
a = image_mean
a = image_std
a = do_pad
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
def __lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Tuple=False ) ->Dict:
"""simple docstring"""
if not batched:
a = self.size['''shortest_edge''']
a = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
a = size / min(__UpperCAmelCase , __UpperCAmelCase )
if h < w:
a , a = size, scale * w
else:
a , a = scale * h, size
a = int((1_333 / 800) * size )
if max(__UpperCAmelCase , __UpperCAmelCase ) > max_size:
a = max_size / max(__UpperCAmelCase , __UpperCAmelCase )
a = newh * scale
a = neww * scale
a , a = int(newh + 0.5 ), int(neww + 0.5 )
a , a = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
a = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BridgeTowerImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : str ) ->List[str]:
"""simple docstring"""
a = BridgeTowerImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''size_divisor''' ) )
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self : str ) ->Union[str, Any]:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ = "bart"
UpperCAmelCase__ = True
@st.cache(allow_output_mutation=a )
def _a ( ) -> Tuple:
if LOAD_DENSE_INDEX:
a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
a = qar_model.eval()
else:
a , a = (None, None)
if MODEL_TYPE == "bart":
a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
a = sas_model.eval()
else:
a , a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a )
def _a ( ) -> Dict:
if LOAD_DENSE_INDEX:
a = faiss.StandardGpuResources()
a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
a = faiss.IndexFlatIP(128 )
a = faiss.index_cpu_to_gpu(a , 1 , a )
wikiaab_gpu_index_flat.add(a ) # TODO fix for larger GPU
else:
a , a = (None, None)
a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a )
def _a ( ) -> Optional[int]:
a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
a = elia['''train_eli5''']
a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(a )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_indexes()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_models()
UpperCAmelCase__ , UpperCAmelCase__ = load_train_data()
def _a ( a :str , a :Tuple=10 ) -> List[str]:
a = embed_questions_for_retrieval([question] , a , a )
a , a = eli5_train_q_index.search(a , a )
a = [elia_train[int(a )] for i in I[0]]
return nn_examples
def _a ( a :str , a :Any="wiki40b" , a :int="dense" , a :Union[str, Any]=10 ) -> List[str]:
if source == "none":
a , a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
a , a = query_qa_dense_index(
a , a , a , a , a , a )
else:
a , a = query_es_index(
a , a , index_name='''english_wiki40b_snippets_100w''' , n_results=a , )
a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
a = '''question: {} context: {}'''.format(a , a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None),
} )
def _a ( a :Tuple , a :int , a :int , a :Dict=64 , a :List[Any]=256 , a :List[Any]=False , a :List[Any]=2 , a :Tuple=0.95 , a :Optional[Any]=0.8 ) -> int:
with torch.no_grad():
a = qa_sas_generate(
a , a , a , num_answers=1 , num_beams=a , min_len=a , max_len=a , do_sample=a , temp=a , top_p=a , top_k=a , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
UpperCAmelCase__ = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
UpperCAmelCase__ = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
UpperCAmelCase__ = st.sidebar.checkbox("Demo options")
if demo_options:
UpperCAmelCase__ = st.sidebar.selectbox(
"",
action_list,
index=3,
)
UpperCAmelCase__ = action_list.index(action_st)
UpperCAmelCase__ = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
UpperCAmelCase__ = show_type == "Show full text of passages"
else:
UpperCAmelCase__ = 3
UpperCAmelCase__ = True
UpperCAmelCase__ = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
UpperCAmelCase__ = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
UpperCAmelCase__ = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
UpperCAmelCase__ = "wiki40b"
UpperCAmelCase__ = "dense"
UpperCAmelCase__ = "beam"
UpperCAmelCase__ = 2
UpperCAmelCase__ = 64
UpperCAmelCase__ = 256
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = st.sidebar.checkbox("Generation options")
if generate_options:
UpperCAmelCase__ = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
UpperCAmelCase__ = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
UpperCAmelCase__ = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ = None
# start main text
UpperCAmelCase__ = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
UpperCAmelCase__ = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ = st.text_input("Enter your question here:", "")
else:
UpperCAmelCase__ = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method="dense", n_results=10)
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method="sparse", n_results=10)
UpperCAmelCase__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ = support_list[:10]
UpperCAmelCase__ = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ , UpperCAmelCase__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
UpperCAmelCase__ = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
UpperCAmelCase__ = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ = "[{}]({})".format(res[0], wiki_url)
else:
UpperCAmelCase__ = sec_titles.split(" & ")
UpperCAmelCase__ = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ = find_nearest_training(question)
UpperCAmelCase__ = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
UpperCAmelCase__ = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
UpperCAmelCase__ = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 1 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class _snake_case :
def __init__( self: List[Any] , __lowerCamelCase: str = None , __lowerCamelCase: list = [] ) -> int:
__UpperCAmelCase : int = 0
__UpperCAmelCase : int = choices
__UpperCAmelCase : List[Any] = prompt
if sys.platform == "win32":
__UpperCAmelCase : str = '''*'''
else:
__UpperCAmelCase : Union[str, Any] = '''➔ '''
def _lowerCamelCase ( self: int , __lowerCamelCase: Optional[Any] , __lowerCamelCase: str = "" ) -> Optional[int]:
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _SCREAMING_SNAKE_CASE )
else:
forceWrite(self.choices[index] , _SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: int ) -> List[Any]:
if index == self.position:
forceWrite(f''' {self.arrow_char} ''' )
self.write_choice(_SCREAMING_SNAKE_CASE )
else:
forceWrite(f''' {self.choices[index]}''' )
reset_cursor()
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Direction , __lowerCamelCase: int = 1 ) -> Optional[Any]:
__UpperCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_SCREAMING_SNAKE_CASE )
move_cursor(_SCREAMING_SNAKE_CASE , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def _lowerCamelCase ( self: str ) -> int:
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def _lowerCamelCase ( self: Any ) -> Dict:
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_SCREAMING_SNAKE_CASE )] for number in range(10 )] )
def _lowerCamelCase ( self: Dict ) -> str:
__UpperCAmelCase : Dict = int(chr(self.current_selection ) )
__UpperCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _SCREAMING_SNAKE_CASE )
else:
return
else:
return
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int = 0 ) -> Tuple:
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
__UpperCAmelCase : str = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_SCREAMING_SNAKE_CASE )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
__UpperCAmelCase : Optional[int] = int(builtins.input() )
except ValueError:
__UpperCAmelCase : Optional[int] = default_choice
else:
__UpperCAmelCase : str = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(_SCREAMING_SNAKE_CASE , "\n" )
return choice
| 157 |
from __future__ import annotations
lowerCamelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class _a :
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : dict[str, list[str]] , _SCREAMING_SNAKE_CASE : str )-> None:
lowerCAmelCase__ : List[Any] = graph
# mapping node to its parent in resulting breadth first tree
lowerCAmelCase__ : dict[str, str | None] = {}
lowerCAmelCase__ : str = source_vertex
def UpperCAmelCase__( self : str )-> None:
lowerCAmelCase__ : Dict = {self.source_vertex}
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : List[str] = [self.source_vertex] # first in first out queue
while queue:
lowerCAmelCase__ : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = vertex
queue.append(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : str )-> str:
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCAmelCase__ : str = self.parent.get(_SCREAMING_SNAKE_CASE )
if target_vertex_parent is None:
lowerCAmelCase__ : Optional[Any] = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
return self.shortest_path(_SCREAMING_SNAKE_CASE ) + F'->{target_vertex}'
if __name__ == "__main__":
lowerCamelCase = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 131 | 0 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__A =collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__A ='''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCamelCase__ )[0]
@deprecated(lowerCamelCase__ , "Please use tf.data to implement this functionality." )
def lowerCamelCase_ ( lowerCamelCase__ ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase__ ) as bytestream:
lowerCamelCase_ = _readaa(lowerCamelCase__ )
if magic != 2_0_5_1:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
lowerCamelCase_ = _readaa(lowerCamelCase__ )
lowerCamelCase_ = _readaa(lowerCamelCase__ )
lowerCamelCase_ = _readaa(lowerCamelCase__ )
lowerCamelCase_ = bytestream.read(rows * cols * num_images )
lowerCamelCase_ = numpy.frombuffer(lowerCamelCase__ , dtype=numpy.uinta )
lowerCamelCase_ = data.reshape(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 1 )
return data
@deprecated(lowerCamelCase__ , "Please use tf.one_hot on tensors." )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = labels_dense.shape[0]
lowerCamelCase_ = numpy.arange(lowerCamelCase__ ) * num_classes
lowerCamelCase_ = numpy.zeros((num_labels, num_classes) )
lowerCamelCase_ = 1
return labels_one_hot
@deprecated(lowerCamelCase__ , "Please use tf.data to implement this functionality." )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=1_0 ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase__ ) as bytestream:
lowerCamelCase_ = _readaa(lowerCamelCase__ )
if magic != 2_0_4_9:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
lowerCamelCase_ = _readaa(lowerCamelCase__ )
lowerCamelCase_ = bytestream.read(lowerCamelCase__ )
lowerCamelCase_ = numpy.frombuffer(lowerCamelCase__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowerCamelCase__ , lowerCamelCase__ )
return labels
class _SCREAMING_SNAKE_CASE :
@deprecated(
lowercase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self , lowercase , lowercase , lowercase=False , lowercase=False , lowercase=dtypes.floataa , lowercase=True , lowercase=None , ) -> Any:
lowerCamelCase_ , lowerCamelCase_ = random_seed.get_seed(lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCamelCase_ = dtypes.as_dtype(lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
lowerCamelCase_ = 10000
lowerCamelCase_ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
lowerCamelCase_ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCamelCase_ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCamelCase_ = images.astype(numpy.floataa )
lowerCamelCase_ = numpy.multiply(lowercase , 1.0 / 2_5_5.0 )
lowerCamelCase_ = images
lowerCamelCase_ = labels
lowerCamelCase_ = 0
lowerCamelCase_ = 0
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
return self._images
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
return self._labels
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self._num_examples
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._epochs_completed
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=False , lowercase=True ) -> Any:
if fake_data:
lowerCamelCase_ = [1] * 784
lowerCamelCase_ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowercase )],
[fake_label for _ in range(lowercase )],
)
lowerCamelCase_ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCamelCase_ = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase )
lowerCamelCase_ = self.images[perma]
lowerCamelCase_ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCamelCase_ = self._num_examples - start
lowerCamelCase_ = self._images[start : self._num_examples]
lowerCamelCase_ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCamelCase_ = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase )
lowerCamelCase_ = self.images[perm]
lowerCamelCase_ = self.labels[perm]
# Start next epoch
lowerCamelCase_ = 0
lowerCamelCase_ = batch_size - rest_num_examples
lowerCamelCase_ = self._index_in_epoch
lowerCamelCase_ = self._images[start:end]
lowerCamelCase_ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCamelCase_ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCamelCase__ , "Please write your own downloading logic." )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if not gfile.Exists(lowerCamelCase__ ):
gfile.MakeDirs(lowerCamelCase__ )
lowerCamelCase_ = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if not gfile.Exists(lowerCamelCase__ ):
urllib.request.urlretrieve(lowerCamelCase__ , lowerCamelCase__ ) # noqa: S310
with gfile.GFile(lowerCamelCase__ ) as f:
lowerCamelCase_ = f.size()
print("Successfully downloaded" , lowerCamelCase__ , lowerCamelCase__ , "bytes." )
return filepath
@deprecated(
lowerCamelCase__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=dtypes.floataa , lowerCamelCase__=True , lowerCamelCase__=5_0_0_0 , lowerCamelCase__=None , lowerCamelCase__=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCamelCase__ , one_hot=lowerCamelCase__ , dtype=lowerCamelCase__ , seed=lowerCamelCase__ )
lowerCamelCase_ = fake()
lowerCamelCase_ = fake()
lowerCamelCase_ = fake()
return _Datasets(train=lowerCamelCase__ , validation=lowerCamelCase__ , test=lowerCamelCase__ )
if not source_url: # empty string check
lowerCamelCase_ = DEFAULT_SOURCE_URL
lowerCamelCase_ = "train-images-idx3-ubyte.gz"
lowerCamelCase_ = "train-labels-idx1-ubyte.gz"
lowerCamelCase_ = "t10k-images-idx3-ubyte.gz"
lowerCamelCase_ = "t10k-labels-idx1-ubyte.gz"
lowerCamelCase_ = _maybe_download(
lowerCamelCase__ , lowerCamelCase__ , source_url + train_images_file )
with gfile.Open(lowerCamelCase__ , "rb" ) as f:
lowerCamelCase_ = _extract_images(lowerCamelCase__ )
lowerCamelCase_ = _maybe_download(
lowerCamelCase__ , lowerCamelCase__ , source_url + train_labels_file )
with gfile.Open(lowerCamelCase__ , "rb" ) as f:
lowerCamelCase_ = _extract_labels(lowerCamelCase__ , one_hot=lowerCamelCase__ )
lowerCamelCase_ = _maybe_download(
lowerCamelCase__ , lowerCamelCase__ , source_url + test_images_file )
with gfile.Open(lowerCamelCase__ , "rb" ) as f:
lowerCamelCase_ = _extract_images(lowerCamelCase__ )
lowerCamelCase_ = _maybe_download(
lowerCamelCase__ , lowerCamelCase__ , source_url + test_labels_file )
with gfile.Open(lowerCamelCase__ , "rb" ) as f:
lowerCamelCase_ = _extract_labels(lowerCamelCase__ , one_hot=lowerCamelCase__ )
if not 0 <= validation_size <= len(lowerCamelCase__ ):
lowerCamelCase_ = (
"Validation size should be between 0 and "
F'{len(lowerCamelCase__ )}. Received: {validation_size}.'
)
raise ValueError(lowerCamelCase__ )
lowerCamelCase_ = train_images[:validation_size]
lowerCamelCase_ = train_labels[:validation_size]
lowerCamelCase_ = train_images[validation_size:]
lowerCamelCase_ = train_labels[validation_size:]
lowerCamelCase_ = {"dtype": dtype, "reshape": reshape, "seed": seed}
lowerCamelCase_ = _DataSet(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
lowerCamelCase_ = _DataSet(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
lowerCamelCase_ = _DataSet(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
return _Datasets(train=lowerCamelCase__ , validation=lowerCamelCase__ , test=lowerCamelCase__ )
| 47 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__A ={
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'decision_transformer'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowercase=17 , lowercase=4 , lowercase=128 , lowercase=4_096 , lowercase=True , lowercase=1 , lowercase=1_024 , lowercase=3 , lowercase=1 , lowercase=None , lowercase="relu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=50_256 , lowercase=50_256 , lowercase=False , lowercase=False , **lowercase , ) -> Tuple:
lowerCAmelCase = state_dim
lowerCAmelCase = act_dim
lowerCAmelCase = hidden_size
lowerCAmelCase = max_ep_len
lowerCAmelCase = action_tanh
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = n_inner
lowerCAmelCase = activation_function
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = scale_attn_weights
lowerCAmelCase = use_cache
lowerCAmelCase = scale_attn_by_inverse_layer_idx
lowerCAmelCase = reorder_and_upcast_attn
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
| 46 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = BartphoTokenizer
lowercase_ = False
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
super().setUp()
lowerCamelCase__: int =["▁This", "▁is", "▁a", "▁t", "est"]
lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: List[Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file , "w" , encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
lowerCamelCase__: Dict =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Optional[Any]) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] ="This is a là test"
lowerCamelCase__: Optional[Any] ="This is a<unk><unk> test"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: str =BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map)
lowerCamelCase__: List[Any] ="This is a là test"
lowerCamelCase__: Optional[int] ="▁This ▁is ▁a ▁l à ▁t est".split()
lowerCamelCase__: Optional[int] =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[Any] =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 10 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : int = {
'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCamelCase__ ( A , A ):
"""simple docstring"""
__a = """dinat"""
__a = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : str=64 , UpperCamelCase : List[Any]=[3, 4, 6, 5] , UpperCamelCase : Tuple=[2, 4, 8, 16] , UpperCamelCase : Optional[int]=7 , UpperCamelCase : Union[str, Any]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCamelCase : Dict=3.0 , UpperCamelCase : List[Any]=True , UpperCamelCase : int=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : str=0.1 , UpperCamelCase : str="gelu" , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : str=1e-5 , UpperCamelCase : List[Any]=0.0 , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Tuple , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = patch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = embed_dim
__UpperCAmelCase : Optional[Any] = depths
__UpperCAmelCase : Optional[int] = len(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = num_heads
__UpperCAmelCase : str = kernel_size
__UpperCAmelCase : Optional[Any] = dilations
__UpperCAmelCase : str = mlp_ratio
__UpperCAmelCase : Dict = qkv_bias
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = drop_path_rate
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Tuple = layer_norm_eps
__UpperCAmelCase : Tuple = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase : Any = int(embed_dim * 2 ** (len(UpperCamelCase ) - 1) )
__UpperCAmelCase : Union[str, Any] = layer_scale_init_value
__UpperCAmelCase : Union[str, Any] = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(UpperCamelCase ) + 1 )]
__UpperCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=UpperCamelCase , out_indices=UpperCamelCase , stage_names=self.stage_names )
| 354 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : List[str] = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 320 | 0 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _a ( _UpperCAmelCase , unittest.TestCase ):
A = CpmAntTokenizer
A = False
def __snake_case (self ) -> Tuple:
super().setUp()
UpperCAmelCase_: Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
UpperCAmelCase_: int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __snake_case (self ) -> int:
UpperCAmelCase_: List[Any] = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
UpperCAmelCase_: Dict = """今天天气真好!"""
UpperCAmelCase_: Tuple = ["""今天""", """天气""", """真""", """好""", """!"""]
UpperCAmelCase_: Optional[int] = tokenizer.tokenize(a__ )
self.assertListEqual(a__, a__ )
UpperCAmelCase_: str = """今天天气真好!"""
UpperCAmelCase_: List[Any] = [tokenizer.bos_token] + tokens
UpperCAmelCase_: Tuple = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ), a__ )
UpperCAmelCase_: Any = tokenizer.decode(a__ )
self.assertEqual(a__, a__ )
| 147 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
A_ : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
A_ : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
A_ : bool = field(
default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
__snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__snake_case = SeqaSeqDataset
# Get datasets
__snake_case = (
dataset_class(
snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__snake_case = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
__snake_case = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
__snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__snake_case = train_result.metrics
__snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate(metric_key_prefix='''val''' )
__snake_case = data_args.n_val
__snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' )
__snake_case = test_output.metrics
__snake_case = data_args.n_test
if trainer.is_world_process_zero():
__snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
__snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__snake_case = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24 | 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
lowercase__ :Tuple = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
lowercase__ :Optional[Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
lowercase__ :List[Any] = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def A__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int32'''),
'''references''': datasets.Value('''int32'''),
}) ,reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] ,)
def A__ ( self ,A__ ,A__ ,A__=None):
return {
"matthews_correlation": float(matthews_corrcoef(A__ ,A__ ,sample_weight=A__)),
}
| 97 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : int =ComputeEnvironment.AMAZON_SAGEMAKER
lowercase_ : Optional[int] =True
lowercase_ : Any ='''ml.p3.2xlarge'''
lowercase_ : Any ='''accelerate_sagemaker_execution_role'''
lowercase_ : Union[str, Any] ='''hf-sm'''
lowercase_ : Any ='''us-east-1'''
lowercase_ : List[str] =1
lowercase_ : Any ='''accelerate-sagemaker-1'''
lowercase_ : Union[str, Any] ='''1.6'''
lowercase_ : Any ='''4.4'''
lowercase_ : Any ='''train.py'''
lowercase_ : int =[
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
lowercase_ : List[Any] =[
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class lowercase ( unittest.TestCase ):
def A__ ( self):
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowercase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] ,A__)
assert isinstance(converted_args['''do_train'''] ,A__)
assert isinstance(converted_args['''epochs'''] ,A__)
assert isinstance(converted_args['''learning_rate'''] ,A__)
assert isinstance(converted_args['''max_steps'''] ,A__)
with pytest.raises(A__):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args)
| 97 | 1 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
_lowerCamelCase : int =AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(__a )
from datasets import load_dataset
_lowerCamelCase : Any =load_dataset('nielsr/rvlcdip-demo' )
_lowerCamelCase : int =dataset['train'][0]['image'].convert('RGB' )
_lowerCamelCase : List[str] =image_processor(__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
_lowerCamelCase : List[str] =model(**__a )
_lowerCamelCase : Dict =outputs.logits
_lowerCamelCase : Any =torch.Size((1, 16) )
self.assertEqual(logits.shape , __a )
_lowerCamelCase : int =torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=__a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __a , atol=1E-4 ) )
| 199 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Tuple = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57 | 0 |
def a_ ( lowerCAmelCase_ : list ):
__lowerCAmelCase = len(lowerCAmelCase_ )
for i in range(1, lowerCAmelCase_ ):
__lowerCAmelCase = collection[i]
__lowerCAmelCase = 0
__lowerCAmelCase = i - 1
while low <= high:
__lowerCAmelCase = (low + high) // 2
if val < collection[mid]:
__lowerCAmelCase = mid - 1
else:
__lowerCAmelCase = mid + 1
for j in range(lowerCAmelCase_, lowerCAmelCase_, -1 ):
__lowerCAmelCase = collection[j - 1]
__lowerCAmelCase = val
return collection
if __name__ == "__main__":
_snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
_snake_case : Tuple = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 207 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def a_ ( lowerCAmelCase_ : Dict[str, torch.Tensor] ):
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for rt in rc.restypes:
__lowerCAmelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__lowerCAmelCase = {name: i for i, name in enumerate(lowerCAmelCase_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
__lowerCAmelCase = torch.tensor(
lowerCAmelCase_, dtype=torch.intaa, device=protein['aatype'].device, )
__lowerCAmelCase = torch.tensor(
lowerCAmelCase_, dtype=torch.intaa, device=protein['aatype'].device, )
__lowerCAmelCase = torch.tensor(
lowerCAmelCase_, dtype=torch.floataa, device=protein['aatype'].device, )
__lowerCAmelCase = protein['aatype'].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__lowerCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase = residx_atomaa_mask
__lowerCAmelCase = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__lowerCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__lowerCAmelCase = torch.zeros([21, 37], dtype=torch.floataa, device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
__lowerCAmelCase = rc.restype_atoa[restype_letter]
__lowerCAmelCase = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__lowerCAmelCase = rc.atom_order[atom_name]
__lowerCAmelCase = 1
__lowerCAmelCase = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase = residx_atomaa_mask
return protein
def a_ ( lowerCAmelCase_ : Dict[str, torch.Tensor] ):
__lowerCAmelCase = tree_map(lambda lowerCAmelCase_ : torch.tensor(lowerCAmelCase_, device=batch['aatype'].device ), lowerCAmelCase_, np.ndarray )
__lowerCAmelCase = tensor_tree_map(lambda lowerCAmelCase_ : np.array(lowerCAmelCase_ ), make_atomaa_masks(lowerCAmelCase_ ) )
return out
| 207 | 1 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ = None ) -> List[str]:
a : Optional[Any] = value
a : Node | None = None # Added in order to delete a node easier
a : Node | None = None
a : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ = None ) -> Tuple:
a : Dict = root
def __str__( self ) -> str:
return str(self.root )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if new_children is not None: # reset its kids
a : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase__ ): # If it is the right children
a : Any = new_children
else:
a : Dict = new_children
else:
a : Any = new_children
def __a ( self , lowerCAmelCase__ ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __a ( self ) -> bool:
return self.root is None
def __a ( self , lowerCAmelCase__ ) -> None:
a : int = Node(lowerCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
a : Union[str, Any] = new_node # set its root
else: # Tree is not empty
a : Any = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
a : Dict = new_node # We insert the new node in a leaf
break
else:
a : Optional[int] = parent_node.left
else:
if parent_node.right is None:
a : List[str] = new_node
break
else:
a : Tuple = parent_node.right
a : Union[str, Any] = parent_node
def __a ( self , *lowerCAmelCase__ ) -> None:
for value in values:
self.__insert(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> Node | None:
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
a : Any = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
a : Tuple = node.left if value < node.value else node.right
return node
def __a ( self , lowerCAmelCase__ = None ) -> Node | None:
if node is None:
if self.root is None:
return None
a : List[str] = self.root
if not self.empty():
while node.right is not None:
a : Any = node.right
return node
def __a ( self , lowerCAmelCase__ = None ) -> Node | None:
if node is None:
a : str = self.root
if self.root is None:
return None
if not self.empty():
a : Any = self.root
while node.left is not None:
a : Dict = node.left
return node
def __a ( self , lowerCAmelCase__ ) -> None:
a : List[str] = self.search(lowerCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase__ , lowerCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase__ , node.left )
else:
a : Union[str, Any] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
a : List[str] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __a ( self , lowerCAmelCase__ ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __a ( self , lowerCAmelCase__=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if node:
self.inorder(lowerCAmelCase__ , node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase__ , node.right )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
a : list[int] = []
self.inorder(lowerCAmelCase__ , lowerCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def _SCREAMING_SNAKE_CASE ( _lowercase : Node | None ) ->list[Node]:
'''simple docstring'''
a : List[str] = []
if curr_node is not None:
a : Dict = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
a : Dict = (8, 3, 6, 1, 10, 14, 13, 4, 7)
a : Union[str, Any] = BinarySearchTree()
for i in testlist:
t.insert(_lowercase )
# Prints all the elements of the list in order traversal
print(_lowercase )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn't exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn't exist" )
if not t.empty():
print("Max Value: " , t.get_max().value ) # type: ignore
print("Min Value: " , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_lowercase )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 105 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase_ ( ):
lowercase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=__SCREAMING_SNAKE_CASE , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=__SCREAMING_SNAKE_CASE , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=__SCREAMING_SNAKE_CASE )
return parser.parse_args()
def UpperCAmelCase_ ( ):
lowercase = parse_args()
# Import training_script as a module.
lowercase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase = script_fpath.stem
lowercase = importlib.import_module(__SCREAMING_SNAKE_CASE )
# Patch sys.argv
lowercase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 195 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ :int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ :Any = ["input_ids", "attention_mask"]
UpperCAmelCase_ :List[Any] = None
def __init__( self , __A=None , __A=None , __A=None , __A="<unk>" , __A="<s>" , __A="</s>" , __A="<pad>" , __A=False , __A=False , **__A , ) -> Optional[int]:
super().__init__(
__A , __A , tokenizer_file=__A , unk_token=__A , bos_token=__A , eos_token=__A , pad_token=__A , add_prefix_space=__A , clean_up_tokenization_spaces=__A , **__A , )
lowerCAmelCase_ :str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __A ) != add_prefix_space:
lowerCAmelCase_ :Optional[int] = getattr(__A , pre_tok_state.pop("""type""" ) )
lowerCAmelCase_ :Union[str, Any] = add_prefix_space
lowerCAmelCase_ :Any = pre_tok_class(**__A )
lowerCAmelCase_ :Dict = add_prefix_space
def __lowerCAmelCase ( self , *__A , **__A ) -> BatchEncoding:
lowerCAmelCase_ :str = kwargs.get("""is_split_into_words""" , __A )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*__A , **__A )
def __lowerCAmelCase ( self , *__A , **__A ) -> BatchEncoding:
lowerCAmelCase_ :int = kwargs.get("""is_split_into_words""" , __A )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._encode_plus(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]:
lowerCAmelCase_ :Tuple = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def __lowerCAmelCase ( self , __A ) -> List[int]:
lowerCAmelCase_ :List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__A , add_special_tokens=__A ) + [self.eos_token_id] )
if len(__A ) > self.model_max_length:
lowerCAmelCase_ :List[str] = input_ids[-self.model_max_length :]
return input_ids
| 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Any = """laion/clap-htsat-unfused"""
lowerCAmelCase_ :Optional[Any] = tempfile.mkdtemp()
def __lowerCAmelCase ( self , **__A ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self , **__A ) -> Tuple:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Dict = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase_ :Dict = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 )
lowerCAmelCase_ :Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = self.get_feature_extractor()
lowerCAmelCase_ :str = self.get_tokenizer()
lowerCAmelCase_ :List[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :Optional[Any] = floats_list((3, 1000) )
lowerCAmelCase_ :Optional[Any] = feature_extractor(__A , return_tensors="""np""" )
lowerCAmelCase_ :str = processor(audios=__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :List[Any] = """This is a test string"""
lowerCAmelCase_ :Dict = processor(text=__A )
lowerCAmelCase_ :List[str] = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = self.get_feature_extractor()
lowerCAmelCase_ :Tuple = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ :Tuple = processor.batch_decode(__A )
lowerCAmelCase_ :Optional[Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 1 | 1 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( snake_case__ :Tuple , snake_case__ :Optional[Any] , snake_case__ :int=None) -> Optional[int]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
_A = nn.Parameter(snake_case__)
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
_A = nn.Parameter(snake_case__)
def snake_case ( snake_case__ :Any , snake_case__ :int , snake_case__ :Optional[Any]) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
_A = np.asarray(weights[0])
_A = np.asarray(weights[1])
_A = np.asarray(weights[2])
set_param(
torch_layer.self_attention.query_key , torch.tensor(snake_case__).transpose(1 , 2).contiguous().view(-1 , snake_case__) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case__).transpose(1 , 2).contiguous().view(-1 , snake_case__) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case__).view(-1 , snake_case__).contiguous().transpose(0 , 1) , )
def snake_case ( snake_case__ :Any , snake_case__ :Tuple , snake_case__ :Dict) -> Any:
# set torch weights for 1-to-1 comparison
_A = np.asarray(weights[0])
_A = np.asarray(weights[1])
_A = np.asarray(weights[2])
_A = np.asarray(weights[3])
set_param(
torch_layer.self_attention.query , torch.tensor(snake_case__).transpose(1 , 2).contiguous().view(-1 , snake_case__) , )
set_param(
torch_layer.self_attention.key , torch.tensor(snake_case__).transpose(1 , 2).contiguous().view(-1 , snake_case__) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case__).transpose(1 , 2).contiguous().view(-1 , snake_case__) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case__).view(-1 , snake_case__).contiguous().transpose(0 , 1) , )
def snake_case ( snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Optional[Any]) -> Optional[Any]:
# layernorm 1
_A = weights[0][0][0]
_A = np.asarray(layer_norm_a[0])
_A = np.asarray(layer_norm_a[1])
set_param(
torch_block.attention.layer_norm , torch.tensor(snake_case__) , torch.tensor(snake_case__) , )
# lsh weights + output
_A = weights[0][1]
if len(snake_case__) < 4:
set_layer_weights_in_torch_lsh(snake_case__ , torch_block.attention , snake_case__)
else:
set_layer_weights_in_torch_local(snake_case__ , torch_block.attention , snake_case__)
# intermediate weighs
_A = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__) == 4:
_A = intermediate_weights[2]
# layernorm 2
_A = np.asarray(intermediate_weights[0][0])
_A = np.asarray(intermediate_weights[0][1])
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(snake_case__) , torch.tensor(snake_case__) , )
# intermediate dense
_A = np.asarray(intermediate_weights[1][0])
_A = np.asarray(intermediate_weights[1][1])
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(snake_case__).transpose(0 , 1).contiguous() , torch.tensor(snake_case__) , )
# intermediate out
_A = np.asarray(intermediate_weights[4][0])
_A = np.asarray(intermediate_weights[4][1])
set_param(
torch_block.feed_forward.output.dense , torch.tensor(snake_case__).transpose(0 , 1).contiguous() , torch.tensor(snake_case__) , )
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :List[str] , snake_case__ :List[Any]) -> Optional[int]:
# reformer model
_A = torch_model.reformer
# word embeds
_A = np.asarray(weights[1])
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(snake_case__) , )
if isinstance(weights[3] , snake_case__):
_A = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights)):
_A = np.asarray(weights[3][emb_idx][0])
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
_A = nn.Parameter(torch.tensor(snake_case__))
_A = weights[5]
assert len(torch_model_reformer.encoder.layers) * 4 == len(
snake_case__), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers):
_A = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ , snake_case__ , snake_case__)
# output layer norm
_A = np.asarray(weights[7][0])
_A = np.asarray(weights[7][1])
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(snake_case__) , torch.tensor(snake_case__) , )
# output embeddings
_A = np.asarray(weights[9][0])
_A = np.asarray(weights[9][1])
set_param(
torch_model.lm_head.decoder , torch.tensor(snake_case__).transpose(0 , 1).contiguous() , torch.tensor(snake_case__) , )
def snake_case ( snake_case__ :Dict , snake_case__ :Dict , snake_case__ :Union[str, Any]) -> Optional[Any]:
# Initialise PyTorch model
_A = ReformerConfig.from_json_file(snake_case__)
print(F'''Building PyTorch model from configuration: {config}''')
_A = ReformerModelWithLMHead(snake_case__)
with open(snake_case__ , """rb""") as f:
_A = pickle.load(snake_case__)["""weights"""]
set_model_weights_in_torch(snake_case__ , snake_case__ , config.hidden_size)
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 180 | from math import isqrt, loga
def snake_case ( snake_case__ :int) -> list[int]:
_A = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , snake_case__ , snake_case__):
_A = False
return [i for i in range(2 , snake_case__) if is_prime[i]]
def snake_case ( snake_case__ :int = 800_800 , snake_case__ :int = 800_800) -> int:
_A = degree * loga(snake_case__)
_A = int(snake_case__)
_A = calculate_prime_numbers(snake_case__)
_A = 0
_A = 0
_A = len(snake_case__) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left])
+ prime_numbers[left] * loga(prime_numbers[right])
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 180 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def __lowercase ( _a ):
snake_case_, snake_case_ : Dict = np.shape(_a )
if rows != columns:
snake_case_ : Optional[int] = (
'''\'table\' has to be of square shaped array but got a '''
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(_a )
snake_case_ : str = np.zeros((rows, columns) )
snake_case_ : Optional[Any] = np.zeros((rows, columns) )
for i in range(_a ):
for j in range(_a ):
snake_case_ : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(_a ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
snake_case_ : Any = (table[i][j] - total) / upper[j][j]
snake_case_ : Tuple = 1
for j in range(_a , _a ):
snake_case_ : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(_a ) )
snake_case_ : Optional[Any] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 |
"""simple docstring"""
def __lowercase ( _a = 4_000_000 ):
snake_case_ : Dict = []
snake_case_, snake_case_ : List[str] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_a )
snake_case_, snake_case_ : str = b, a + b
return sum(_a )
if __name__ == "__main__":
print(f'{solution() = }')
| 155 | 1 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(lowerCamelCase_ , i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : List[Any] = _distribute_shards(**lowerCamelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : int = _split_gen_kwargs(lowerCamelCase_ , lowerCamelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
if expected is RuntimeError:
with pytest.raises(lowerCamelCase_ ):
_number_of_shards_in_gen_kwargs(lowerCamelCase_ )
else:
_lowercase : Tuple = _number_of_shards_in_gen_kwargs(lowerCamelCase_ )
assert out == expected
| 21 | import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str , **lowerCamelCase : List[Any] ):
UpperCamelCase_ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase , **lowerCamelCase )
UpperCamelCase_ : str = AutoModelForSeqaSeqLM.from_config(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
AutoTokenizer.from_pretrained(lowerCamelCase ).save_pretrained(lowerCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 175 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class lowerCAmelCase_ ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = 'sew'
def __init__( self : str , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : List[str]=7_68 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Any=30_72 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Tuple=1E-5 , _UpperCAmelCase : Dict="group" , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : List[Any]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , _UpperCAmelCase : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _UpperCAmelCase : Tuple=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _UpperCAmelCase : str=False , _UpperCAmelCase : Tuple=1_28 , _UpperCAmelCase : str=16 , _UpperCAmelCase : int=True , _UpperCAmelCase : List[str]=0.05 , _UpperCAmelCase : str=10 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Optional[int]=10 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Optional[int]="mean" , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=2_56 , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : Optional[int]=2 , **_UpperCAmelCase : List[str] , ):
"""simple docstring"""
super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = feat_extract_norm
UpperCAmelCase__ = feat_extract_activation
UpperCAmelCase__ = list(_UpperCamelCase )
UpperCAmelCase__ = list(_UpperCamelCase )
UpperCAmelCase__ = list(_UpperCamelCase )
UpperCAmelCase__ = conv_bias
UpperCAmelCase__ = num_conv_pos_embeddings
UpperCAmelCase__ = num_conv_pos_embedding_groups
UpperCAmelCase__ = len(self.conv_dim )
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = squeeze_factor
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = feat_proj_dropout
UpperCAmelCase__ = final_dropout
UpperCAmelCase__ = layerdrop
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ = apply_spec_augment
UpperCAmelCase__ = mask_time_prob
UpperCAmelCase__ = mask_time_length
UpperCAmelCase__ = mask_time_min_masks
UpperCAmelCase__ = mask_feature_prob
UpperCAmelCase__ = mask_feature_length
UpperCAmelCase__ = mask_feature_min_masks
# ctc loss
UpperCAmelCase__ = ctc_loss_reduction
UpperCAmelCase__ = ctc_zero_infinity
# sequence classification
UpperCAmelCase__ = use_weighted_layer_sum
UpperCAmelCase__ = classifier_proj_size
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 358 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ = s.rsplit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return new.join(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ = {}
UpperCAmelCase__ = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
UpperCAmelCase__ = key.replace(F'''{group_key}.''' , F'''{group_key}.group.''' )
if "res_path" in key:
UpperCAmelCase__ = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
UpperCAmelCase__ = rreplace(SCREAMING_SNAKE_CASE__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
UpperCAmelCase__ = rreplace(SCREAMING_SNAKE_CASE__ , """.b""" , """.bias""" , 1 )
UpperCAmelCase__ = value.float()
return upgrade
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[Any]=True ):
'''simple docstring'''
from dall_e import Encoder
UpperCAmelCase__ = Encoder()
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = torch.load(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = ckpt.state_dict()
encoder.load_state_dict(SCREAMING_SNAKE_CASE__ )
if config_path is not None:
UpperCAmelCase__ = FlavaImageCodebookConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = FlavaImageCodebookConfig()
UpperCAmelCase__ = FlavaImageCodebook(SCREAMING_SNAKE_CASE__ ).eval()
UpperCAmelCase__ = encoder.state_dict()
UpperCAmelCase__ = upgrade_state_dict(SCREAMING_SNAKE_CASE__ )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = hf_model.state_dict()
UpperCAmelCase__ = count_parameters(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = count_parameters(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase_ = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 61 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
_snake_case = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
_snake_case = "▁"
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "token_type_ids"]
_a = FNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A : int = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
_A : Optional[int] = do_lower_case
_A : List[Any] = remove_space
_A : str = keep_accents
_A : int = vocab_file
_A : int = False if not self.vocab_file else True
def a__ ( self , _a , _a = None ) -> List[int]:
_A : str = [self.sep_token_id]
_A : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Any = [self.sep_token_id]
_A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : List[str] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 26 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Any = "▁"
_lowerCAmelCase : Optional[Any] = {"vocab_file": "spiece.model"}
_lowerCAmelCase : List[Any] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
_lowerCAmelCase : Union[str, Any] = {
"google/pegasus-xsum": 512,
}
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , __snake_case , __snake_case="<pad>" , __snake_case="</s>" , __snake_case="<unk>" , __snake_case="<mask_2>" , __snake_case="<mask_1>" , __snake_case=None , __snake_case=103 , __snake_case = None , **__snake_case , ) -> None:
'''simple docstring'''
__a =offset
if additional_special_tokens is not None:
if not isinstance(__snake_case , __snake_case ):
raise TypeError(
f'additional_special_tokens should be of type {type(__snake_case )}, but is'
f' {type(__snake_case )}' )
__a =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(__snake_case ) , self.offset - 1 )
]
if len(set(__snake_case ) ) != len(__snake_case ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
__a =additional_special_tokens_extended
else:
__a =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
__a ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__snake_case , unk_token=__snake_case , mask_token=__snake_case , pad_token=__snake_case , mask_token_sent=__snake_case , offset=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
__a =mask_token_sent
__a =vocab_file
__a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__snake_case )
# add special tokens to encoder dict
__a ={
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__a ={v: k for k, v in self.encoder.items()}
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.offset
def __magic_name__ ( self ) -> Dict[str, int]:
'''simple docstring'''
__a ={self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.__dict__.copy()
__a =None
return state
def __setstate__( self , __snake_case ) -> str:
'''simple docstring'''
__a =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a ={}
__a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ ( self , __snake_case ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__a =self.sp_model.piece_to_id(__snake_case )
return sp_id + self.offset
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__a =self.sp_model.IdToPiece(index - self.offset )
return token
def __magic_name__ ( self , __snake_case ) -> Any:
'''simple docstring'''
__a =[]
__a =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__snake_case ) + token
__a =[]
else:
current_sub_tokens.append(__snake_case )
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def __magic_name__ ( self , __snake_case=False ) -> Union[str, Any]:
'''simple docstring'''
return 1
def __magic_name__ ( self , __snake_case ) -> Optional[int]:
'''simple docstring'''
__a =set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(__snake_case )
elif token_ids_a is None:
return self._special_token_mask(__snake_case ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __magic_name__ ( self , __snake_case , __snake_case=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__snake_case ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__a =os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , 'wb' ) as fi:
__a =self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
| 308 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> Optional[Any]:
'''simple docstring'''
__a =()
for resnet, attn in zip(self.resnets , self.attentions ):
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_downsample:
__a =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case=True ) -> Optional[int]:
'''simple docstring'''
__a =()
for resnet in self.resnets:
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
__a =self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =[]
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =resnets
__a =attentions
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =[]
for i in range(self.num_layers ):
__a =self.in_channels if (i == self.num_layers - 1) else self.out_channels
__a =self.prev_output_channel if i == 0 else self.out_channels
__a =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
if self.add_upsample:
__a =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
__a =res_hidden_states_tuple[-1]
__a =res_hidden_states_tuple[:-1]
__a =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
__a =self.upsamplers_a(__snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = jnp.floataa
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# there is always at least one resnet
__a =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__a =[]
for _ in range(self.num_layers ):
__a =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
__a =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
__a =resnets
__a =attentions
def __call__( self , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> List[str]:
'''simple docstring'''
__a =self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__a =attn(__snake_case , __snake_case , deterministic=__snake_case )
__a =resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 308 | 1 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
def get_masked_lm_array(_UpperCamelCase : str ):
_SCREAMING_SNAKE_CASE =f"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_SCREAMING_SNAKE_CASE =tf.train.load_variable(_UpperCamelCase , _UpperCamelCase )
if "kernel" in name:
_SCREAMING_SNAKE_CASE =array.transpose()
return torch.from_numpy(_UpperCamelCase )
def get_encoder_array(_UpperCamelCase : str ):
_SCREAMING_SNAKE_CASE =f"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_SCREAMING_SNAKE_CASE =tf.train.load_variable(_UpperCamelCase , _UpperCamelCase )
if "kernel" in name:
_SCREAMING_SNAKE_CASE =array.transpose()
return torch.from_numpy(_UpperCamelCase )
def get_encoder_layer_array(_UpperCamelCase : int , _UpperCamelCase : str ):
_SCREAMING_SNAKE_CASE =f"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_SCREAMING_SNAKE_CASE =tf.train.load_variable(_UpperCamelCase , _UpperCamelCase )
if "kernel" in name:
_SCREAMING_SNAKE_CASE =array.transpose()
return torch.from_numpy(_UpperCamelCase )
def get_encoder_attention_layer_array(_UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : str ):
_SCREAMING_SNAKE_CASE =f"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
_SCREAMING_SNAKE_CASE =tf.train.load_variable(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =array.reshape(_UpperCamelCase )
if "kernel" in name:
_SCREAMING_SNAKE_CASE =array.transpose()
return torch.from_numpy(_UpperCamelCase )
print(f"Loading model based on config from {config_path}..." )
_SCREAMING_SNAKE_CASE =BertConfig.from_json_file(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =BertForMaskedLM(_UpperCamelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =model.bert.encoder.layer[layer_index]
# Self-attention
_SCREAMING_SNAKE_CASE =layer.attention.self
_SCREAMING_SNAKE_CASE =get_encoder_attention_layer_array(
_UpperCamelCase , '_query_dense/kernel' , self_attn.query.weight.data.shape )
_SCREAMING_SNAKE_CASE =get_encoder_attention_layer_array(
_UpperCamelCase , '_query_dense/bias' , self_attn.query.bias.data.shape )
_SCREAMING_SNAKE_CASE =get_encoder_attention_layer_array(
_UpperCamelCase , '_key_dense/kernel' , self_attn.key.weight.data.shape )
_SCREAMING_SNAKE_CASE =get_encoder_attention_layer_array(
_UpperCamelCase , '_key_dense/bias' , self_attn.key.bias.data.shape )
_SCREAMING_SNAKE_CASE =get_encoder_attention_layer_array(
_UpperCamelCase , '_value_dense/kernel' , self_attn.value.weight.data.shape )
_SCREAMING_SNAKE_CASE =get_encoder_attention_layer_array(
_UpperCamelCase , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
_SCREAMING_SNAKE_CASE =layer.attention.output
_SCREAMING_SNAKE_CASE =get_encoder_attention_layer_array(
_UpperCamelCase , '_output_dense/kernel' , self_output.dense.weight.data.shape )
_SCREAMING_SNAKE_CASE =get_encoder_attention_layer_array(
_UpperCamelCase , '_output_dense/bias' , self_output.dense.bias.data.shape )
_SCREAMING_SNAKE_CASE =get_encoder_layer_array(_UpperCamelCase , '_attention_layer_norm/gamma' )
_SCREAMING_SNAKE_CASE =get_encoder_layer_array(_UpperCamelCase , '_attention_layer_norm/beta' )
# Intermediate
_SCREAMING_SNAKE_CASE =layer.intermediate
_SCREAMING_SNAKE_CASE =get_encoder_layer_array(_UpperCamelCase , '_intermediate_dense/kernel' )
_SCREAMING_SNAKE_CASE =get_encoder_layer_array(_UpperCamelCase , '_intermediate_dense/bias' )
# Output
_SCREAMING_SNAKE_CASE =layer.output
_SCREAMING_SNAKE_CASE =get_encoder_layer_array(_UpperCamelCase , '_output_dense/kernel' )
_SCREAMING_SNAKE_CASE =get_encoder_layer_array(_UpperCamelCase , '_output_dense/bias' )
_SCREAMING_SNAKE_CASE =get_encoder_layer_array(_UpperCamelCase , '_output_layer_norm/gamma' )
_SCREAMING_SNAKE_CASE =get_encoder_layer_array(_UpperCamelCase , '_output_layer_norm/beta' )
# Embeddings
_SCREAMING_SNAKE_CASE =get_encoder_array('_position_embedding_layer/embeddings' )
_SCREAMING_SNAKE_CASE =get_encoder_array('_type_embedding_layer/embeddings' )
_SCREAMING_SNAKE_CASE =get_encoder_array('_embedding_norm_layer/gamma' )
_SCREAMING_SNAKE_CASE =get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
_SCREAMING_SNAKE_CASE =model.cls.predictions.transform
_SCREAMING_SNAKE_CASE =get_masked_lm_array('dense/kernel' )
_SCREAMING_SNAKE_CASE =get_masked_lm_array('dense/bias' )
_SCREAMING_SNAKE_CASE =get_masked_lm_array('layer_norm/gamma' )
_SCREAMING_SNAKE_CASE =get_masked_lm_array('layer_norm/beta' )
_SCREAMING_SNAKE_CASE =get_masked_lm_array('embedding_table' )
# Pooling
_SCREAMING_SNAKE_CASE =BertPooler(config=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =get_encoder_array('_pooler_layer/kernel' )
_SCREAMING_SNAKE_CASE =get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(_UpperCamelCase )
# Integration test - should load without any errors ;)
_SCREAMING_SNAKE_CASE =BertForMaskedLM.from_pretrained(_UpperCamelCase )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 47 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Any = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 47 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__snake_case : List[str] = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: List[str] , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , "vision")
self.check_model_type(_SCREAMING_SNAKE_CASE)
def __call__( self: str , _SCREAMING_SNAKE_CASE: Union[str, "Image.Image", List[Dict[str, Any]]] , _SCREAMING_SNAKE_CASE: Union[str, List[str]] = None , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> int:
"""simple docstring"""
if "text_queries" in kwargs:
__lowerCAmelCase : List[str] = kwargs.pop("text_queries")
if isinstance(_SCREAMING_SNAKE_CASE , (str, Image.Image)):
__lowerCAmelCase : Any = {"image": image, "candidate_labels": candidate_labels}
else:
__lowerCAmelCase : Dict = image
__lowerCAmelCase : Optional[int] = super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
return results
def _SCREAMING_SNAKE_CASE ( self: Any , **_SCREAMING_SNAKE_CASE: Tuple) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = {}
if "threshold" in kwargs:
__lowerCAmelCase : Optional[int] = kwargs["threshold"]
if "top_k" in kwargs:
__lowerCAmelCase : int = kwargs["top_k"]
return {}, {}, postprocess_params
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Dict) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = load_image(inputs["image"])
__lowerCAmelCase : Union[str, Any] = inputs["candidate_labels"]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[int] = candidate_labels.split(",")
__lowerCAmelCase : Union[str, Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework)
__lowerCAmelCase : Dict = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=self.framework)
yield {
"is_last": i == len(_SCREAMING_SNAKE_CASE) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = model_inputs.pop("target_size")
__lowerCAmelCase : Any = model_inputs.pop("candidate_label")
__lowerCAmelCase : List[str] = model_inputs.pop("is_last")
__lowerCAmelCase : Dict = self.model(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=None) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = []
for model_output in model_outputs:
__lowerCAmelCase : Dict = model_output["candidate_label"]
__lowerCAmelCase : int = BaseModelOutput(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = self.image_processor.post_process_object_detection(
outputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
__lowerCAmelCase : Any = outputs["scores"][index].item()
__lowerCAmelCase : int = self._get_bounding_box(outputs["boxes"][index][0])
__lowerCAmelCase : List[str] = {"score": score, "label": label, "box": box}
results.append(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE: x["score"] , reverse=_SCREAMING_SNAKE_CASE)
if top_k:
__lowerCAmelCase : str = results[:top_k]
return results
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = box.int().tolist()
__lowerCAmelCase : Any = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 58 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__snake_case : str = logging.get_logger(__name__)
# General docstring
__snake_case : Optional[int] = 'PoolFormerConfig'
# Base docstring
__snake_case : Any = 'sail/poolformer_s12'
__snake_case : Optional[Any] = [1, 512, 7, 7]
# Image classification docstring
__snake_case : List[Any] = 'sail/poolformer_s12'
__snake_case : Optional[Any] = 'tabby, tabby cat'
__snake_case : Union[str, Any] = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _lowercase ( __snake_case ,__snake_case = 0.0 ,__snake_case = False ) -> Tuple:
if drop_prob == 0.0 or not training:
return input
__lowerCAmelCase : Optional[int] = 1 - drop_prob
__lowerCAmelCase : Union[str, Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__lowerCAmelCase : List[str] = keep_prob + torch.rand(__snake_case ,dtype=input.dtype ,device=input.device )
random_tensor.floor_() # binarize
__lowerCAmelCase : Tuple = input.div(__snake_case ) * random_tensor
return output
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[float] = None) -> None:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Dict = drop_prob
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: torch.Tensor) -> torch.Tensor:
"""simple docstring"""
return drop_path(_SCREAMING_SNAKE_CASE , self.drop_prob , self.training)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob)
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Any=None) -> int:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[int] = patch_size if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable) else (patch_size, patch_size)
__lowerCAmelCase : Any = stride if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable) else (stride, stride)
__lowerCAmelCase : Any = padding if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable) else (padding, padding)
__lowerCAmelCase : Optional[int] = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = norm_layer(_SCREAMING_SNAKE_CASE) if norm_layer else nn.Identity()
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : str = self.projection(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.norm(_SCREAMING_SNAKE_CASE)
return embeddings
class A__ ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: List[Any]) -> Tuple:
"""simple docstring"""
super().__init__(1 , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: int , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Dict = nn.AvgPoolad(_SCREAMING_SNAKE_CASE , stride=1 , padding=pool_size // 2 , count_include_pad=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Dict) -> Dict:
"""simple docstring"""
return self.pool(_SCREAMING_SNAKE_CASE) - hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Dict = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1)
__lowerCAmelCase : Tuple = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1)
__lowerCAmelCase : Any = PoolFormerDropPath(_SCREAMING_SNAKE_CASE)
if isinstance(config.hidden_act , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[int] = ACTaFN[config.hidden_act]
else:
__lowerCAmelCase : int = config.hidden_act
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = self.conva(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.act_fn(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = self.drop(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = self.conva(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.drop(_SCREAMING_SNAKE_CASE)
return hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : List[str] = PoolFormerPooling(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = PoolFormerOutput(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = PoolFormerGroupNorm(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = PoolFormerGroupNorm(_SCREAMING_SNAKE_CASE)
# Useful for training neural nets
__lowerCAmelCase : Optional[int] = PoolFormerDropPath(_SCREAMING_SNAKE_CASE) if drop_path > 0.0 else nn.Identity()
__lowerCAmelCase : Union[str, Any] = config.use_layer_scale
if config.use_layer_scale:
__lowerCAmelCase : List[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_SCREAMING_SNAKE_CASE)) , requires_grad=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_SCREAMING_SNAKE_CASE)) , requires_grad=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
if self.use_layer_scale:
__lowerCAmelCase : int = self.pooling(self.before_norm(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : List[str] = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * pooling_output
# First residual connection
__lowerCAmelCase : Optional[Any] = hidden_states + self.drop_path(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = ()
__lowerCAmelCase : Union[str, Any] = self.output(self.after_norm(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : Dict = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * layer_output
# Second residual connection
__lowerCAmelCase : List[str] = hidden_states + self.drop_path(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = (output,) + outputs
return outputs
else:
__lowerCAmelCase : Optional[Any] = self.drop_path(self.pooling(self.before_norm(_SCREAMING_SNAKE_CASE)))
# First residual connection
__lowerCAmelCase : Optional[Any] = pooling_output + hidden_states
__lowerCAmelCase : List[Any] = ()
# Second residual connection inside the PoolFormerOutput block
__lowerCAmelCase : Any = self.drop_path(self.output(self.after_norm(_SCREAMING_SNAKE_CASE)))
__lowerCAmelCase : str = hidden_states + layer_output
__lowerCAmelCase : List[Any] = (output,) + outputs
return outputs
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[int] = config
# stochastic depth decay rule
__lowerCAmelCase : Tuple = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths))]
# patch embeddings
__lowerCAmelCase : List[str] = []
for i in range(config.num_encoder_blocks):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ))
__lowerCAmelCase : Tuple = nn.ModuleList(_SCREAMING_SNAKE_CASE)
# Transformer blocks
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : Any = 0
for i in range(config.num_encoder_blocks):
# each block consists of layers
__lowerCAmelCase : List[Any] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i]):
layers.append(
PoolFormerLayer(
_SCREAMING_SNAKE_CASE , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio) , drop_path=dpr[cur + j] , ))
blocks.append(nn.ModuleList(_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : Union[str, Any] = nn.ModuleList(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=True) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = () if output_hidden_states else None
__lowerCAmelCase : Union[str, Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block)):
__lowerCAmelCase , __lowerCAmelCase : str = layers
# Get patch embeddings from hidden_states
__lowerCAmelCase : str = embedding_layer(_SCREAMING_SNAKE_CASE)
# Send the embeddings through the blocks
for _, blk in enumerate(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : int = blk(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = layer_outputs[0]
if output_hidden_states:
__lowerCAmelCase : int = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = PoolFormerConfig
SCREAMING_SNAKE_CASE = 'poolformer'
SCREAMING_SNAKE_CASE = 'pixel_values'
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: List[Any]) -> List[str]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_SCREAMING_SNAKE_CASE , nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]=False) -> Dict:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[Any] = value
__snake_case : Union[str, Any] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__snake_case : str = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: Optional[int]) -> Any:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = config
__lowerCAmelCase : Any = PoolFormerEncoder(_SCREAMING_SNAKE_CASE)
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[torch.FloatTensor] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
__lowerCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
__lowerCAmelCase : Union[str, Any] = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , config.hidden_size)
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.dense(_SCREAMING_SNAKE_CASE)
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Dict:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = config.num_labels
__lowerCAmelCase : Tuple = PoolFormerModel(_SCREAMING_SNAKE_CASE)
# Final norm
__lowerCAmelCase : Optional[Any] = PoolFormerGroupNorm(config.hidden_sizes[-1])
# Classifier head
__lowerCAmelCase : Any = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[torch.FloatTensor] = None , _SCREAMING_SNAKE_CASE: Optional[torch.LongTensor] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
__lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : Union[str, Any] = self.poolformer(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = outputs[0]
__lowerCAmelCase : Optional[int] = self.classifier(self.norm(_SCREAMING_SNAKE_CASE).mean([-2, -1]))
__lowerCAmelCase : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase : int = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase : List[Any] = "single_label_classification"
else:
__lowerCAmelCase : Union[str, Any] = "multi_label_classification"
if self.config.problem_type == "regression":
__lowerCAmelCase : Dict = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze())
else:
__lowerCAmelCase : int = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase : int = CrossEntropyLoss()
__lowerCAmelCase : str = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase : Union[str, Any] = BCEWithLogitsLoss()
__lowerCAmelCase : Optional[int] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
if not return_dict:
__lowerCAmelCase : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states) | 58 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : Optional[int] = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[int] = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
UpperCamelCase__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : str = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Any = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Dict = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Tuple = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Any = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(cls , ['''flax'''] ) | 320 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _a ( UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : Dict = args.log_outputs
lowerCamelCase__ : Optional[Any] = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowerCamelCase__ : Tuple = load_metric('''wer''' )
lowerCamelCase__ : Any = load_metric('''cer''' )
# compute metrics
lowerCamelCase__ : Optional[int] = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowerCamelCase__ : Tuple = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowerCamelCase__ : Any = f"WER: {wer_result}\nCER: {cer_result}"
print(UpperCAmelCase )
with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f:
f.write(UpperCAmelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCamelCase__ : Optional[int] = f"log_{dataset_id}_predictions.txt"
lowerCamelCase__ : Union[str, Any] = f"log_{dataset_id}_targets.txt"
with open(UpperCAmelCase , '''w''' ) as p, open(UpperCAmelCase , '''w''' ) as t:
# mapping function to write output
def write_to_file(UpperCAmelCase , UpperCAmelCase ):
p.write(f"{i}" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"{i}" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(UpperCAmelCase , with_indices=UpperCAmelCase )
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCamelCase__ : Dict = re.sub(UpperCAmelCase , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCamelCase__ : Optional[int] = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowerCamelCase__ : Union[str, Any] = ''' '''.join(text.split(UpperCAmelCase ) )
return text
def _a ( UpperCAmelCase ) -> Tuple:
"""simple docstring"""
# load dataset
lowerCamelCase__ : Optional[Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=UpperCAmelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCamelCase__ : Dict = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCamelCase__ : Union[str, Any] = feature_extractor.sampling_rate
# resample audio
lowerCamelCase__ : Any = dataset.cast_column('''audio''' , Audio(sampling_rate=UpperCAmelCase ) )
# load eval pipeline
if args.device is None:
lowerCamelCase__ : Optional[Any] = 0 if torch.cuda.is_available() else -1
lowerCamelCase__ : str = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(UpperCAmelCase ):
lowerCamelCase__ : Any = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCamelCase__ : Tuple = prediction['''text''']
lowerCamelCase__ : int = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowerCamelCase__ : Tuple = dataset.map(UpperCAmelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
_A : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
_A : List[Any] = parser.parse_args()
main(args)
| 265 |
from math import ceil, sqrt
def _a ( UpperCAmelCase = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__ : Any = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__ : List[Any] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__ : Union[str, Any] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 265 | 1 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__snake_case = logging.get_logger(__name__)
def a ( __a , __a , __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :Dict = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__a ) == len(__a ), f'''{len(__a )} != {len(__a )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__snake_case = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__snake_case = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a ( __a , __a ) -> Any:
'''simple docstring'''
try:
UpperCamelCase__ :List[str] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
f''' {n_student}''' )
return list(range(__a ) )
def a ( __a , __a ) -> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(f'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(__a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a ( __a , __a = "student" , __a = None , __a = None , __a=False , __a=None , __a=None , **__a , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(__a , __a ):
AutoTokenizer.from_pretrained(__a ).save_pretrained(__a ) # purely for convenience
UpperCamelCase__ :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(__a ).eval()
else:
assert isinstance(__a , __a ), f'''teacher must be a model or string got type {type(__a )}'''
UpperCamelCase__ :Optional[int] = teacher.config.to_diff_dict()
try:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCamelCase__ :Dict = teacher_e
if d is None:
UpperCamelCase__ :Tuple = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
UpperCamelCase__ , UpperCamelCase__ :Dict = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCamelCase__ , UpperCamelCase__ :int = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCamelCase__ :List[str] = teacher_e
if d is None:
UpperCamelCase__ :Dict = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__a )
# Copy weights
UpperCamelCase__ :Union[str, Any] = teacher.config_class(**__a )
UpperCamelCase__ :str = AutoModelForSeqaSeqLM.from_config(__a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCamelCase__ :Tuple = student.load_state_dict(teacher.state_dict() , strict=__a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCamelCase__ , UpperCamelCase__ :Any = list(range(__a ) ), list(range(__a ) )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
f''' {save_path}''' )
student.save_pretrained(__a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCamelCase__ :List[int] = pick_layers_to_copy(__a , __a )
if d_layers_to_copy is None:
UpperCamelCase__ :List[int] = pick_layers_to_copy(__a , __a )
try:
if hasattr(
__a , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __a )
copy_layers(teacher.decoder.block , student.decoder.block , __a )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
UpperCamelCase__ :Union[str, Any] = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(__a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers) | 97 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase__ :Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCamelCase__ :List[str] = value
else:
UpperCamelCase__ :Dict = value
return new_state_dict
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :str = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Tuple = in_proj_bias[:256]
UpperCamelCase__ :Optional[int] = in_proj_weight[256:512, :]
UpperCamelCase__ :Optional[Any] = in_proj_bias[256:512]
UpperCamelCase__ :Tuple = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ :List[str] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :Any = in_proj_weight[:256, :]
UpperCamelCase__ :Optional[int] = in_proj_bias[:256]
UpperCamelCase__ :Tuple = in_proj_weight[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias[256:512]
UpperCamelCase__ :Any = in_proj_weight[-256:, :]
UpperCamelCase__ :Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase__ :List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :]
UpperCamelCase__ :Any = in_proj_bias_cross_attn[:256]
UpperCamelCase__ :Any = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias_cross_attn[256:512]
UpperCamelCase__ :str = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase__ :Tuple = in_proj_bias_cross_attn[-256:]
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :str = image.size
UpperCamelCase__ :Optional[Any] = max(__a , __a )
UpperCamelCase__ :List[Any] = 800 if '''detection''' in checkpoint_url else 1000
UpperCamelCase__ :Dict = target_max_size / current_max_size
UpperCamelCase__ :Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Any = F.to_tensor(__a )
UpperCamelCase__ :int = F.normalize(__a , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def a ( __a , __a , __a ) -> Dict:
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(__a , __a , __a )
UpperCamelCase__ :Any = rename_backbone_keys(__a )
# query, key and value matrices need special treatment
read_in_q_k_v(__a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase__ :Dict = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase__ :Optional[Any] = state_dict.pop(__a )
UpperCamelCase__ :int = val
# create HuggingFace model and load state dict
UpperCamelCase__ :str = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase__ :List[str] = 15
UpperCamelCase__ :int = 2
UpperCamelCase__ :Tuple = {0: '''table''', 1: '''table rotated'''}
UpperCamelCase__ :int = idalabel
UpperCamelCase__ :Dict = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase__ :int = 125
UpperCamelCase__ :List[str] = 6
UpperCamelCase__ :Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
UpperCamelCase__ :Dict = idalabel
UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ :List[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
UpperCamelCase__ :int = TableTransformerForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
# verify our conversion
UpperCamelCase__ :Dict = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
UpperCamelCase__ :Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__a )
UpperCamelCase__ :Tuple = Image.open(__a ).convert('''RGB''' )
UpperCamelCase__ :int = normalize(resize(__a , __a ) ).unsqueeze(0 )
UpperCamelCase__ :Optional[int] = model(__a )
if "detection" in checkpoint_url:
UpperCamelCase__ :Dict = (1, 15, 3)
UpperCamelCase__ :List[Any] = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
UpperCamelCase__ :Tuple = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
UpperCamelCase__ :Optional[Any] = (1, 125, 7)
UpperCamelCase__ :Dict = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
UpperCamelCase__ :List[Any] = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
UpperCamelCase__ :Union[str, Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(__a )
image_processor.push_to_hub(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 97 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
snake_case = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCamelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} )
UpperCamelCase_ : Optional[str] = field(default=lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCamelCase_ : Optional[str] = field(default=lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCamelCase_ : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCamelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if self.train_dir is not None:
SCREAMING_SNAKE_CASE : List[str] = self.train_dir
if self.validation_dir is not None:
SCREAMING_SNAKE_CASE : int = self.validation_dir
SCREAMING_SNAKE_CASE : Any = data_files if data_files else None
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : str = field(
default=lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
UpperCamelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
UpperCamelCase_ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase_ : str = field(default=lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCamelCase_ : bool = field(
default=lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCamelCase_ : float = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
UpperCamelCase_ : bool = field(
default=lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : float = field(
default=1e-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , lowercase , lowercase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : List[Any] = training_args.get_process_log_level()
logger.setLevel(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
SCREAMING_SNAKE_CASE : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE : Optional[Any] = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE : Dict = ds["train"].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE : int = split["train"]
SCREAMING_SNAKE_CASE : List[str] = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : int = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE : str = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : Optional[Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase )
else:
SCREAMING_SNAKE_CASE : Any = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase )
else:
SCREAMING_SNAKE_CASE : int = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : Tuple = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMAEForPreTraining(lowercase )
if training_args.do_train:
SCREAMING_SNAKE_CASE : Union[str, Any] = ds["train"].column_names
else:
SCREAMING_SNAKE_CASE : str = ds["validation"].column_names
if data_args.image_column_name is not None:
SCREAMING_SNAKE_CASE : Tuple = data_args.image_column_name
elif "image" in column_names:
SCREAMING_SNAKE_CASE : List[Any] = "image"
elif "img" in column_names:
SCREAMING_SNAKE_CASE : Optional[Any] = "img"
else:
SCREAMING_SNAKE_CASE : Optional[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE : Any = image_processor.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE : str = (image_processor.size["height"], image_processor.size["width"])
SCREAMING_SNAKE_CASE : Union[str, Any] = Compose(
[
Lambda(lambda lowercase : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowercase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = [transforms(lowercase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Dict = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : int = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase )
# Compute absolute learning rate
SCREAMING_SNAKE_CASE : List[str] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
SCREAMING_SNAKE_CASE : Tuple = Trainer(
model=lowercase , args=lowercase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : Dict = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : int = last_checkpoint
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate()
trainer.log_metrics("eval" , lowercase )
trainer.save_metrics("eval" , lowercase )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE : Optional[Any] = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 319 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : Optional[int] = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if issubclass(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path
elif issubclass(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path]
SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache"
SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
for split in splits:
SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE : Any = {split: parquet_path}
else:
SCREAMING_SNAKE_CASE : Tuple = "train"
SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path}
SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache"
SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
SCREAMING_SNAKE_CASE : List[Any] = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]}
SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} )
SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase )
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert get_writer_batch_size(lowercase ) == expected
| 319 | 1 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
A__ : Optional[Any] = logging.getLogger(__name__)
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=3_05_22, type=int)
A__ : int = parser.parse_args()
logger.info(F"Loading data from {args.data_file}")
with open(args.data_file, 'rb') as fp:
A__ : List[str] = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
A__ : Optional[Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
A__ : int = [0] * args.vocab_size
for k, v in counter.items():
A__ : Optional[int] = v
logger.info(F"Dump to {args.token_counts_dump}")
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 207 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 207 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['ViTFeatureExtractor']
a_ = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 358 | a_ = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
a_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
a_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 50 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Optional[Any] ={'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_: List[Any] ={
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class __A ( UpperCamelCase__ ):
a__ : int = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Any = ["""input_ids""", """attention_mask"""]
a__ : Any = None
def __init__(self : Optional[int] , __a : Optional[int]=None , __a : Union[str, Any]=None , __a : Dict=None , __a : List[Any]="<unk>" , __a : Union[str, Any]="<s>" , __a : Any="</s>" , __a : int="<pad>" , __a : str=False , __a : str=False , **__a : int , ):
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , add_prefix_space=__a , clean_up_tokenization_spaces=__a , **__a , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __a ) != add_prefix_space:
UpperCAmelCase_ = getattr(__a , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**__a )
UpperCAmelCase_ = add_prefix_space
def _lowercase (self : Tuple , *__a : Optional[Any] , **__a : str ):
UpperCAmelCase_ = kwargs.get("is_split_into_words" , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
" pretokenized inputs." )
return super()._batch_encode_plus(*__a , **__a )
def _lowercase (self : Tuple , *__a : Tuple , **__a : int ):
UpperCAmelCase_ = kwargs.get("is_split_into_words" , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
" pretokenized inputs." )
return super()._encode_plus(*__a , **__a )
def _lowercase (self : Optional[int] , __a : str , __a : Optional[str] = None ):
UpperCAmelCase_ = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def _lowercase (self : Optional[int] , __a : "Conversation" ):
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
| 1 | '''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = DistilBertTokenizer
a__ : Any = DistilBertTokenizerFast
a__ : str = True
@slow
def _lowercase (self : int ):
UpperCAmelCase_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 1 | 1 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , _UpperCamelCase , )
| 310 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase_ ) as metadata_file:
_UpperCamelCase : Dict = json.load(lowercase_ )
_UpperCamelCase : str = LukeConfig(use_entity_aware_attention=lowercase_ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
_UpperCamelCase : str = torch.load(lowercase_ ,map_location="cpu" )["module"]
# Load the entity vocab file
_UpperCamelCase : Dict = load_original_entity_vocab(lowercase_ )
# add an entry for [MASK2]
_UpperCamelCase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase : Dict = AddedToken("<ent>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
_UpperCamelCase : Union[str, Any] = AddedToken("<ent2>" ,lstrip=lowercase_ ,rstrip=lowercase_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"r" ) as f:
_UpperCamelCase : Tuple = json.load(lowercase_ )
_UpperCamelCase : Optional[int] = "MLukeTokenizer"
with open(os.path.join(lowercase_ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
with open(os.path.join(lowercase_ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase_ ,lowercase_ )
_UpperCamelCase : int = MLukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(["@"] )[0]
_UpperCamelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
_UpperCamelCase : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
_UpperCamelCase : Optional[Any] = word_emb[ent_init_index].unsqueeze(0 )
_UpperCamelCase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCamelCase : Optional[Any] = state_dict[bias_name]
_UpperCamelCase : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCamelCase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCamelCase : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase : Tuple = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase : List[Any] = state_dict[prefix + matrix_name]
_UpperCamelCase : str = state_dict[prefix + matrix_name]
_UpperCamelCase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase : Any = state_dict["entity_embeddings.entity_embeddings.weight"]
_UpperCamelCase : Tuple = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCamelCase : int = state_dict["entity_predictions.bias"]
_UpperCamelCase : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
_UpperCamelCase : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCamelCase : str = LukeForMaskedLM(config=lowercase_ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
_UpperCamelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
_UpperCamelCase : Union[str, Any] = state_dict[key]
else:
_UpperCamelCase : Dict = state_dict[key]
_UpperCamelCase, _UpperCamelCase : Optional[Any] = model.load_state_dict(lowercase_ ,strict=lowercase_ )
if set(lowercase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(lowercase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ ,task="entity_classification" )
_UpperCamelCase : Dict = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
_UpperCamelCase : Optional[Any] = (0, 9)
_UpperCamelCase : int = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : List[str] = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 33, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase : Tuple = torch.Size((1, 1, 768) )
_UpperCamelCase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase_ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCamelCase : List[Any] = MLukeTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase : int = "Tokyo is the capital of <mask>."
_UpperCamelCase : List[Any] = (24, 30)
_UpperCamelCase : Any = tokenizer(lowercase_ ,entity_spans=[span] ,return_tensors="pt" )
_UpperCamelCase : Optional[Any] = model(**lowercase_ )
_UpperCamelCase : int = encoding["input_ids"][0].tolist()
_UpperCamelCase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
_UpperCamelCase : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase_ )
_UpperCamelCase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
_UpperCamelCase : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = ["[MASK]", "[PAD]", "[UNK]"]
_UpperCamelCase : Tuple = [json.loads(lowercase_ ) for line in open(lowercase_ )]
_UpperCamelCase : List[str] = {}
for entry in data:
_UpperCamelCase : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCamelCase : Dict = entity_id
break
_UpperCamelCase : Dict = F'''{language}:{entity_name}'''
_UpperCamelCase : str = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowerCamelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 310 | 1 |
"""simple docstring"""
def lowercase (snake_case__ : float , snake_case__ : list[float] ) -> float:
'''simple docstring'''
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
lowerCAmelCase = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(snake_case__ ) )
return round(snake_case__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
a = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _a ):
def __init__( self : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : str ):
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 155 | 1 |
"""simple docstring"""
__UpperCamelCase : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__UpperCamelCase : Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__UpperCamelCase : Dict = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 309 |
"""simple docstring"""
import re
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
A : List[Any] = logging.get_logger(__name__)
A : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A : Dict = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Any = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Tuple = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Optional[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
A : Tuple = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
A : List[Any] = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
A : Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
A : str = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
A : Union[str, Any] = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __A( a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRContextEncoderTokenizer
class __A( a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRQuestionEncoderTokenizer
A : Optional[Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
A : Optional[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
A : Tuple = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(a )
class __A:
def __call__( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = False , _snake_case = False , _snake_case = None , _snake_case = None , _snake_case = None , **_snake_case , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , return_tensors=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
elif titles is None or texts is None:
__a = titles if texts is None else texts
return super().__call__(
_snake_case , _snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , return_tensors=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
__a = titles if not isinstance(_snake_case , _snake_case ) else [titles]
__a = texts if not isinstance(_snake_case , _snake_case ) else [texts]
__a = len(_snake_case )
__a = questions if not isinstance(_snake_case , _snake_case ) else [questions] * n_passages
assert len(_snake_case ) == len(
_snake_case ), F"""There should be as many titles than texts but got {len(_snake_case )} titles and {len(_snake_case )} texts."""
__a = super().__call__(_snake_case , _snake_case , padding=_snake_case , truncation=_snake_case )['''input_ids''']
__a = super().__call__(_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case )['''input_ids''']
__a = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_snake_case , _snake_case )
]
}
if return_attention_mask is not False:
__a = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__a = attention_mask
return self.pad(_snake_case , padding=_snake_case , max_length=_snake_case , return_tensors=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 16 , _snake_case = 64 , _snake_case = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
__a = reader_input['''input_ids''']
__a , __a , __a = reader_output[:3]
__a = len(_snake_case )
__a = sorted(range(_snake_case ) , reverse=_snake_case , key=relevance_logits.__getitem__ )
__a = []
for doc_id in sorted_docs:
__a = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__a = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__a = sequence_ids.index(self.pad_token_id )
else:
__a = len(_snake_case )
__a = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_snake_case , top_spans=_snake_case , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_snake_case , start_index=_snake_case , end_index=_snake_case , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_snake_case ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
__a = []
for start_index, start_score in enumerate(_snake_case ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__a = sorted(_snake_case , key=lambda _snake_case : x[1] , reverse=_snake_case )
__a = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
__a = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_snake_case ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a )
class __A( a , a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = ['''input_ids''', '''attention_mask''']
snake_case_ = DPRReaderTokenizer | 6 |
"""simple docstring"""
from __future__ import annotations
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 | 0 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_A : Union[str, Any] = logging.getLogger(__name__)
_A : Optional[int] = 'Hello world! cécé herlolip'
_A : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : int = BertAbsConfig(
temp_dir='''.''' , finetune_bert=lowercase_ , large=lowercase_ , share_emb=lowercase_ , use_bert_emb=lowercase_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowerCamelCase__ : List[Any] = torch.load(lowercase_ , lambda UpperCAmelCase , UpperCAmelCase : storage )
lowerCamelCase__ : Union[str, Any] = AbsSummarizer(lowercase_ , torch.device('''cpu''' ) , lowercase_ )
original.eval()
lowerCamelCase__ : Optional[int] = BertAbsSummarizer(lowercase_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
lowerCamelCase__ : List[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
lowerCamelCase__ : Tuple = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowercase_ )) )
lowerCamelCase__ : Dict = torch.tensor(lowercase_ ).unsqueeze(0 )
lowerCamelCase__ : Dict = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowercase_ )) )
lowerCamelCase__ : int = torch.tensor(lowercase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowerCamelCase__ : str = encoder_input_ids
lowerCamelCase__ : Tuple = decoder_input_ids
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Tuple = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowerCamelCase__ : Dict = original(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )[0]
lowerCamelCase__ : Any = original.generator(lowercase_ )
lowerCamelCase__ : Optional[Any] = new_model(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )[0]
lowerCamelCase__ : Union[str, Any] = new_model.generator(lowercase_ )
lowerCamelCase__ : str = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(lowercase_ ) )
lowerCamelCase__ : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(lowercase_ ) )
lowerCamelCase__ : int = torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_A : Tuple = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 362 |
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError('''only integers accepted as input''' )
else:
lowerCamelCase__ : Any = str(abs(UpperCAmelCase ) )
lowerCamelCase__ : Union[str, Any] = [list(UpperCAmelCase ) for char in range(len(UpperCAmelCase ) )]
for index in range(len(UpperCAmelCase ) ):
num_transpositions[index].pop(UpperCAmelCase )
return max(
int(''''''.join(list(UpperCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 265 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = {'vocab_file': 'spiece.model'}
lowerCAmelCase_ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
lowerCAmelCase_ = {
'google/pegasus-xsum': 5_12,
}
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , _A : Union[str, Any] , _A : Optional[int]="<pad>" , _A : Tuple="</s>" , _A : str="<unk>" , _A : List[Any]="<mask_2>" , _A : Optional[int]="<mask_1>" , _A : Dict=None , _A : List[Any]=103 , _A : Optional[Dict[str, Any]] = None , **_A : Optional[int] , ) -> None:
"""simple docstring"""
lowercase : List[Any] = offset
if additional_special_tokens is not None:
if not isinstance(_A , _A ):
raise TypeError(
f"""additional_special_tokens should be of type {type(_A )}, but is"""
f""" {type(_A )}""" )
lowercase : Optional[int] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(_A ) , self.offset - 1 )
]
if len(set(_A ) ) != len(_A ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
lowercase : str = additional_special_tokens_extended
else:
lowercase : List[Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
lowercase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_A , unk_token=_A , mask_token=_A , pad_token=_A , mask_token_sent=_A , offset=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
lowercase : Any = mask_token_sent
lowercase : Optional[int] = vocab_file
lowercase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
# add special tokens to encoder dict
lowercase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowercase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def __a ( self : Optional[Any] ) -> int:
"""simple docstring"""
return len(self.sp_model ) + self.offset
def __a ( self : Optional[int] ) -> Dict[str, int]:
"""simple docstring"""
lowercase : int = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase : str = self.__dict__.copy()
lowercase : str = None
return state
def __setstate__( self : Optional[Any] , _A : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Any = {}
lowercase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self : List[str] , _A : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_A , out_type=_A )
def __a ( self : int , _A : str ) -> int:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowercase : str = self.sp_model.piece_to_id(_A )
return sp_id + self.offset
def __a ( self : Any , _A : int ) -> str:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowercase : List[str] = self.sp_model.IdToPiece(index - self.offset )
return token
def __a ( self : List[str] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : str = []
lowercase : int = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_A ) + token
lowercase : Tuple = []
else:
current_sub_tokens.append(_A )
out_string += self.sp_model.decode(_A )
return out_string.strip()
def __a ( self : int , _A : List[Any]=False ) -> Optional[Any]:
"""simple docstring"""
return 1
def __a ( self : Union[str, Any] , _A : Dict ) -> List[Any]:
"""simple docstring"""
lowercase : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __a ( self : Any , _A : List , _A : Optional[List] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_A )
elif token_ids_a is None:
return self._special_token_mask(_A ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __a ( self : Optional[Any] , _A : Union[str, Any] , _A : Any=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Optional[Any] = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , '''wb''' ) as fi:
lowercase : List[str] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,) | 308 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = ['''input_features''']
def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int:
"""simple docstring"""
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
lowercase : Optional[Any] = n_fft
lowercase : Optional[int] = hop_length
lowercase : Optional[int] = chunk_length
lowercase : Union[str, Any] = chunk_length * sampling_rate
lowercase : Optional[Any] = self.n_samples // hop_length
lowercase : Optional[Any] = sampling_rate
lowercase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def __a ( self : Dict , _A : np.array ) -> np.ndarray:
"""simple docstring"""
lowercase : List[str] = spectrogram(
_A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
lowercase : Union[str, Any] = log_spec[:, :-1]
lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 )
lowercase : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
lowercase : Optional[Any] = np.array(_A , np.intaa )
lowercase : List[str] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase : int = padding_value
normed_input_values.append(_A )
else:
lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
lowercase : List[Any] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : List[str] = [np.asarray([raw_speech] ).T]
lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
lowercase : str = self.pad(
_A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]]
if isinstance(input_features[0] , _A ):
lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
else:
lowercase : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
lowercase : Any = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __a ( self : Optional[Any] ) -> Dict[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output | 308 | 1 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
snake_case_ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
A_ : Optional[Any] = 'maskformer'
A_ : Optional[int] = {'hidden_size': 'mask_feature_size'}
A_ : int = ['resnet', 'swin']
A_ : Any = ['detr']
def __init__(self : Union[str, Any] , a__ : int = 256 , a__ : int = 256 , a__ : float = 0.1 , a__ : bool = False , a__ : Optional[Dict] = None , a__ : Optional[Dict] = None , a__ : float = 0.0_2 , a__ : float = 1.0 , a__ : float = 1.0 , a__ : float = 1.0 , a__ : float = 2_0.0 , a__ : Optional[bool] = None , **a__ : Union[str, Any] , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__snake_case = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case = backbone_config.pop('''model_type''' )
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__snake_case = DetrConfig()
else:
# verify that the decoder is supported
__snake_case = (
decoder_config.pop('''model_type''' ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {','.join(self.decoders_supported )}""" )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case = CONFIG_MAPPING[decoder_type]
__snake_case = config_class.from_dict(_UpperCAmelCase )
__snake_case = backbone_config
__snake_case = decoder_config
# main feature dimension for the model
__snake_case = fpn_feature_size
__snake_case = mask_feature_size
# initializer
__snake_case = init_std
__snake_case = init_xavier_std
# Hungarian matcher && loss
__snake_case = cross_entropy_weight
__snake_case = dice_weight
__snake_case = mask_weight
__snake_case = use_auxiliary_loss
__snake_case = no_object_weight
__snake_case = output_auxiliary_logits
__snake_case = self.decoder_config.encoder_attention_heads
__snake_case = self.decoder_config.num_hidden_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def a (cls : Optional[Any] , a__ : PretrainedConfig , a__ : PretrainedConfig , **a__ : Optional[int] ):
"""simple docstring"""
return cls(
backbone_config=_UpperCAmelCase , decoder_config=_UpperCAmelCase , **_UpperCAmelCase , )
def a (self : int ):
"""simple docstring"""
__snake_case = copy.deepcopy(self.__dict__ )
__snake_case = self.backbone_config.to_dict()
__snake_case = self.decoder_config.to_dict()
__snake_case = self.__class__.model_type
return output
| 369 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Union[str, Any] = (DPMSolverSinglestepScheduler,)
A_ : Union[str, Any] = (('num_inference_steps', 25),)
def a (self : Dict , **a__ : Tuple ):
"""simple docstring"""
__snake_case = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**a__ )
return config
def a (self : str , a__ : Any=0 , **a__ : Tuple ):
"""simple docstring"""
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop('''num_inference_steps''' , a__ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
__snake_case = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case , __snake_case = sample, sample
for t in range(a__ , time_step + scheduler.config.solver_order + 1 ):
__snake_case = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
__snake_case = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
def a (self : List[Any] , a__ : Dict=0 , **a__ : List[str] ):
"""simple docstring"""
__snake_case = dict(self.forward_default_kwargs )
__snake_case = kwargs.pop('''num_inference_steps''' , a__ )
__snake_case = self.dummy_sample
__snake_case = 0.1 * sample
__snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
__snake_case = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
__snake_case = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
__snake_case = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
__snake_case = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a (self : int , a__ : Tuple=None , **a__ : List[str] ):
"""simple docstring"""
if scheduler is None:
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**a__ )
__snake_case = scheduler_class(**a__ )
__snake_case = 10
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
return sample
def a (self : str ):
"""simple docstring"""
__snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__snake_case = 50
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1E-3
def a (self : int ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__snake_case = self.full_loop(scheduler=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
__snake_case = DEISMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverMultistepScheduler.from_config(scheduler.config )
__snake_case = UniPCMultistepScheduler.from_config(scheduler.config )
__snake_case = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__snake_case = self.full_loop(scheduler=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def a (self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=a__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , algorithm_type='''dpmsolver++''' , solver_order=a__ , solver_type=a__ , )
def a (self : Union[str, Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
__snake_case = self.full_loop(
solver_order=a__ , solver_type=a__ , prediction_type=a__ , algorithm_type=a__ , )
assert not torch.isnan(a__ ).any(), "Samples have nan numbers"
def a (self : List[str] ):
"""simple docstring"""
self.check_over_configs(lower_order_final=a__ )
self.check_over_configs(lower_order_final=a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def a (self : Tuple ):
"""simple docstring"""
self.check_over_configs(variance_type=a__ )
self.check_over_configs(variance_type='''learned_range''' )
def a (self : int ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=a__ , time_step=0 )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.full_loop()
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def a (self : int ):
"""simple docstring"""
__snake_case = self.full_loop(use_karras_sigmas=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1E-3
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.full_loop(prediction_type='''v_prediction''' )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1E-3
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=a__ )
__snake_case = torch.mean(torch.abs(a__ ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1E-3
def a (self : int ):
"""simple docstring"""
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(thresholding=a__ , dynamic_thresholding_ratio=0 )
__snake_case = scheduler_class(**a__ )
__snake_case = 10
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter.half()
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.timesteps ):
__snake_case = model(a__ , a__ )
__snake_case = scheduler.step(a__ , a__ , a__ ).prev_sample
assert sample.dtype == torch.floataa
| 238 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ShapEImgaImgPipeline
UpperCamelCase = ['''image''']
UpperCamelCase = ['''image''']
UpperCamelCase = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase = False
@property
def snake_case_( self ) -> Optional[Any]:
return 32
@property
def snake_case_( self ) -> Union[str, Any]:
return 32
@property
def snake_case_( self ) -> int:
return self.time_input_dim * 4
@property
def snake_case_( self ) -> Tuple:
return 8
@property
def snake_case_( self ) -> List[Any]:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_SCREAMING_SNAKE_CASE = CLIPVisionModel(A )
return model
@property
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = CLIPImageProcessor(
crop_size=224 , do_center_crop=A , do_normalize=A , do_resize=A , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
@property
def snake_case_( self ) -> Optional[int]:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_SCREAMING_SNAKE_CASE = PriorTransformer(**A )
return model
@property
def snake_case_( self ) -> Any:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_SCREAMING_SNAKE_CASE = ShapERenderer(**A )
return model
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.dummy_prior
_SCREAMING_SNAKE_CASE = self.dummy_image_encoder
_SCREAMING_SNAKE_CASE = self.dummy_image_processor
_SCREAMING_SNAKE_CASE = self.dummy_renderer
_SCREAMING_SNAKE_CASE = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=A , clip_sample=A , clip_sample_range=1.0 , )
_SCREAMING_SNAKE_CASE = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def snake_case_( self , A , A=0 ) -> Tuple:
_SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
if str(A ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(A )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=A ).manual_seed(A )
_SCREAMING_SNAKE_CASE = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = """cpu"""
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**A )
_SCREAMING_SNAKE_CASE = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(A ) )
_SCREAMING_SNAKE_CASE = output.images[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_SCREAMING_SNAKE_CASE = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_( self ) -> List[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = torch_device == """cpu"""
_SCREAMING_SNAKE_CASE = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=A , relax_max_difference=A , )
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**A )
_SCREAMING_SNAKE_CASE = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A )
for key in inputs.keys():
if key in self.batch_params:
_SCREAMING_SNAKE_CASE = batch_size * [inputs[key]]
_SCREAMING_SNAKE_CASE = pipe(**A , num_images_per_prompt=A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
_SCREAMING_SNAKE_CASE = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
_SCREAMING_SNAKE_CASE = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_SCREAMING_SNAKE_CASE = torch.Generator(device=A ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(
A , generator=A , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(A , A )
| 58 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowercase_ = logging.getLogger(__name__)
lowercase_ = """Hello world! cécé herlolip"""
lowercase_ = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ) ->List[Any]:
_SCREAMING_SNAKE_CASE = BertAbsConfig(
temp_dir=""".""" , finetune_bert=__lowerCamelCase , large=__lowerCamelCase , share_emb=__lowerCamelCase , use_bert_emb=__lowerCamelCase , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
_SCREAMING_SNAKE_CASE = torch.load(__lowerCamelCase , lambda __lowerCamelCase , __lowerCamelCase : storage )
_SCREAMING_SNAKE_CASE = AbsSummarizer(__lowerCamelCase , torch.device("""cpu""" ) , __lowerCamelCase )
original.eval()
_SCREAMING_SNAKE_CASE = BertAbsSummarizer(__lowerCamelCase , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
_SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
_SCREAMING_SNAKE_CASE = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__lowerCamelCase )) )
_SCREAMING_SNAKE_CASE = torch.tensor(__lowerCamelCase ).unsqueeze(0 )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__lowerCamelCase )) )
_SCREAMING_SNAKE_CASE = torch.tensor(__lowerCamelCase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_SCREAMING_SNAKE_CASE = encoder_input_ids
_SCREAMING_SNAKE_CASE = decoder_input_ids
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = _SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_SCREAMING_SNAKE_CASE = original(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE = original.generator(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = new_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE = new_model.generator(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
lowercase_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 58 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 305 | import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] )
SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __UpperCamelCase ) ) , x.transpose() ) , __UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = (1, 2, 1)
SCREAMING_SNAKE_CASE_ = (1, 1, 0, 7)
SCREAMING_SNAKE_CASE_ = SARIMAX(
__UpperCamelCase , exog=__UpperCamelCase , order=__UpperCamelCase , seasonal_order=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model.fit(disp=__UpperCamelCase , maxiter=6_0_0 , method="nm" )
SCREAMING_SNAKE_CASE_ = model_fit.predict(1 , len(__UpperCamelCase ) , exog=[test_match] )
return result[0]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = regressor.predict(__UpperCamelCase )
return y_pred[0]
def a__ ( __UpperCamelCase ):
train_user.sort()
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 2_5 )
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 7_5 )
SCREAMING_SNAKE_CASE_ = qa - qa
SCREAMING_SNAKE_CASE_ = qa - (iqr * 0.1)
return low_lim
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for i in list_vote:
if i > actual_result:
SCREAMING_SNAKE_CASE_ = not_safe + 1
else:
if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
A : Optional[Any] = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
A : Union[str, Any] = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : List[str] = normalize_df[:, 0].tolist()
A : int = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Tuple = x[: len(x) - 1]
A : str = x[len(x) - 1 :]
# for linear regression & sarimax
A : Tuple = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : List[Any] = total_date[len(total_date) - 1 :]
A : List[Any] = total_user[len(total_user) - 1 :]
A : Optional[Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : Optional[int] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : str = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 305 | 1 |
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 15 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE_ = '''bart'''
SCREAMING_SNAKE_CASE_ = True
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__lowerCAmelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__lowerCAmelCase = qar_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__lowerCAmelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__lowerCAmelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__lowerCAmelCase = sas_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
if LOAD_DENSE_INDEX:
__lowerCAmelCase = faiss.StandardGpuResources()
__lowerCAmelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__lowerCAmelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
__lowerCAmelCase = faiss.index_cpu_to_gpu(_lowerCAmelCase , 1 , _lowerCAmelCase )
wikiaab_gpu_index_flat.add(_lowerCAmelCase ) # TODO fix for larger GPU
else:
__lowerCAmelCase , __lowerCAmelCase = (None, None)
__lowerCAmelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCAmelCase )
def lowercase ():
__lowerCAmelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__lowerCAmelCase = elia["""train_eli5"""]
__lowerCAmelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__lowerCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCAmelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_indexes()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_models()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_train_data()
def lowercase (_lowerCAmelCase , _lowerCAmelCase=10 ):
__lowerCAmelCase = embed_questions_for_retrieval([question] , _lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = eli5_train_q_index.search(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = [elia_train[int(_lowerCAmelCase )] for i in I[0]]
return nn_examples
def lowercase (_lowerCAmelCase , _lowerCAmelCase="wiki40b" , _lowerCAmelCase="dense" , _lowerCAmelCase=10 ):
if source == "none":
__lowerCAmelCase , __lowerCAmelCase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__lowerCAmelCase , __lowerCAmelCase = query_qa_dense_index(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
__lowerCAmelCase , __lowerCAmelCase = query_es_index(
_lowerCAmelCase , _lowerCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCAmelCase , )
__lowerCAmelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__lowerCAmelCase = """question: {} context: {}""".format(_lowerCAmelCase , _lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCAmelCase : None),
} )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=64 , _lowerCAmelCase=256 , _lowerCAmelCase=False , _lowerCAmelCase=2 , _lowerCAmelCase=0.95 , _lowerCAmelCase=0.8 ):
with torch.no_grad():
__lowerCAmelCase = qa_sas_generate(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_answers=1 , num_beams=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase , do_sample=_lowerCAmelCase , temp=_lowerCAmelCase , top_p=_lowerCAmelCase , top_k=_lowerCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE_ = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE_ = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE_ = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE_ = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE_ = action_list.index(action_st)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE_ = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE_ = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE_ = '''wiki40b'''
SCREAMING_SNAKE_CASE_ = '''dense'''
SCREAMING_SNAKE_CASE_ = '''beam'''
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 256
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE_ = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE_ = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = None
# start main text
SCREAMING_SNAKE_CASE_ = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE_ = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE_ = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE_ = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''dense''', n_results=10)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
SCREAMING_SNAKE_CASE_ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE_ = support_list[:10]
SCREAMING_SNAKE_CASE_ = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE_ = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE_ = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE_ = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE_ = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE_ = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE_ = find_nearest_training(question)
SCREAMING_SNAKE_CASE_ = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE_ = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE_ = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301 | 0 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowerCAmelCase__ : int =logging.getLogger(__name__)
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase__ : Any = '''summarization'''
UpperCamelCase__ : Dict = ['''loss''']
UpperCamelCase__ : List[str] = ROUGE_KEYS
UpperCamelCase__ : List[str] = '''rouge2'''
def __init__( self , _A , **_A ):
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
__SCREAMING_SNAKE_CASE = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(UpperCamelCase__ , num_labels=UpperCamelCase__ , mode=self.mode , **UpperCamelCase__ )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE = Path(self.output_dir ) / '''metrics.json'''
__SCREAMING_SNAKE_CASE = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = defaultdict(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = self.config.model_type
__SCREAMING_SNAKE_CASE = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
__SCREAMING_SNAKE_CASE = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__SCREAMING_SNAKE_CASE = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
__SCREAMING_SNAKE_CASE = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__SCREAMING_SNAKE_CASE = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], f"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__SCREAMING_SNAKE_CASE = get_git_info()['''repo_sha''']
__SCREAMING_SNAKE_CASE = hparams.num_workers
__SCREAMING_SNAKE_CASE = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__SCREAMING_SNAKE_CASE = self.decoder_start_token_id
__SCREAMING_SNAKE_CASE = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__SCREAMING_SNAKE_CASE = self.hparams.eval_max_gen_length
else:
__SCREAMING_SNAKE_CASE = self.model.config.max_length
__SCREAMING_SNAKE_CASE = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(UpperCamelCase__ , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
__SCREAMING_SNAKE_CASE = True
return readable_batch
def _A ( self , _A , **_A ):
'''simple docstring'''
return self.model(UpperCamelCase__ , **UpperCamelCase__ )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
return lmap(str.strip , UpperCamelCase__ )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer.pad_token_id
__SCREAMING_SNAKE_CASE = batch['''input_ids'''], batch['''attention_mask''']
__SCREAMING_SNAKE_CASE = batch['''labels''']
if isinstance(self.model , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = self.model._shift_right(UpperCamelCase__ )
else:
__SCREAMING_SNAKE_CASE = shift_tokens_right(UpperCamelCase__ , UpperCamelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__SCREAMING_SNAKE_CASE = decoder_input_ids
self.save_readable_batch(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = self(UpperCamelCase__ , attention_mask=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , use_cache=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__SCREAMING_SNAKE_CASE = nn.CrossEntropyLoss(ignore_index=UpperCamelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
__SCREAMING_SNAKE_CASE = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__SCREAMING_SNAKE_CASE = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
__SCREAMING_SNAKE_CASE = label_smoothed_nll_loss(
UpperCamelCase__ , UpperCamelCase__ , self.hparams.label_smoothing , ignore_index=UpperCamelCase__ )
return (loss,)
@property
def _A ( self ):
'''simple docstring'''
return self.tokenizer.pad_token_id
def _A ( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self._step(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = dict(zip(self.loss_names , UpperCamelCase__ ) )
# tokens per batch
__SCREAMING_SNAKE_CASE = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
__SCREAMING_SNAKE_CASE = batch['''input_ids'''].shape[0]
__SCREAMING_SNAKE_CASE = batch['''input_ids'''].eq(self.pad ).sum()
__SCREAMING_SNAKE_CASE = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _A ( self , _A , _A ):
'''simple docstring'''
return self._generative_step(UpperCamelCase__ )
def _A ( self , _A , _A="val" ):
'''simple docstring'''
self.step_count += 1
__SCREAMING_SNAKE_CASE = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__SCREAMING_SNAKE_CASE = losses['''loss''']
__SCREAMING_SNAKE_CASE = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
__SCREAMING_SNAKE_CASE = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__SCREAMING_SNAKE_CASE = torch.tensor(UpperCamelCase__ ).type_as(UpperCamelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = {f"""{prefix}_avg_{k}""": x for k, x in losses.items()}
__SCREAMING_SNAKE_CASE = self.step_count
self.metrics[prefix].append(UpperCamelCase__ ) # callback writes this to self.metrics_save_path
__SCREAMING_SNAKE_CASE = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"""{prefix}_loss""": loss,
f"""{prefix}_{self.val_metric}""": metric_tensor,
}
def _A ( self , _A , _A ):
'''simple docstring'''
return calculate_rouge(UpperCamelCase__ , UpperCamelCase__ )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__SCREAMING_SNAKE_CASE = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=UpperCamelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__SCREAMING_SNAKE_CASE = (time.time() - ta) / batch['''input_ids'''].shape[0]
__SCREAMING_SNAKE_CASE = self.ids_to_clean_text(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = self.ids_to_clean_text(batch['labels'] )
__SCREAMING_SNAKE_CASE = self._step(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = dict(zip(self.loss_names , UpperCamelCase__ ) )
__SCREAMING_SNAKE_CASE = self.calc_generative_metrics(UpperCamelCase__ , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = np.mean(lmap(UpperCamelCase__ , UpperCamelCase__ ) )
base_metrics.update(gen_time=UpperCamelCase__ , gen_len=UpperCamelCase__ , preds=UpperCamelCase__ , target=UpperCamelCase__ , **UpperCamelCase__ )
return base_metrics
def _A ( self , _A , _A ):
'''simple docstring'''
return self._generative_step(UpperCamelCase__ )
def _A ( self , _A ):
'''simple docstring'''
return self.validation_epoch_end(UpperCamelCase__ , prefix='test' )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.n_obs[type_path]
__SCREAMING_SNAKE_CASE = self.target_lens[type_path]
__SCREAMING_SNAKE_CASE = self.dataset_class(
self.tokenizer , type_path=UpperCamelCase__ , n_obs=UpperCamelCase__ , max_target_length=UpperCamelCase__ , **self.dataset_kwargs , )
return dataset
def _A ( self , _A , _A , _A = False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_dataset(UpperCamelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__SCREAMING_SNAKE_CASE = dataset.make_sortish_sampler(UpperCamelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__SCREAMING_SNAKE_CASE = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCamelCase__ , batch_sampler=UpperCamelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=UpperCamelCase__ )
return dataloader
def _A ( self ):
'''simple docstring'''
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def _A ( self ):
'''simple docstring'''
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _A ( _A , _A ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
add_generic_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
'--max_source_length' , default=1_024 , type=UpperCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=56 , type=UpperCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=142 , type=UpperCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=142 , type=UpperCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=UpperCamelCase__ )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=UpperCamelCase__ )
parser.add_argument('--max_tokens_per_batch' , type=UpperCamelCase__ , default=UpperCamelCase__ )
parser.add_argument('--logger_name' , type=UpperCamelCase__ , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=UpperCamelCase__ , default=500 , required=UpperCamelCase__ , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=UpperCamelCase__ , default='summarization' , required=UpperCamelCase__ , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=UpperCamelCase__ , default=0.0 , required=UpperCamelCase__ )
parser.add_argument('--src_lang' , type=UpperCamelCase__ , default='' , required=UpperCamelCase__ )
parser.add_argument('--tgt_lang' , type=UpperCamelCase__ , default='' , required=UpperCamelCase__ )
parser.add_argument('--eval_beams' , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ )
parser.add_argument(
'--val_metric' , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=UpperCamelCase__ , default=1 , required=UpperCamelCase__ , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase__ : str = '''translation'''
UpperCamelCase__ : List[Any] = ['''loss''']
UpperCamelCase__ : List[str] = ['''bleu''']
UpperCamelCase__ : List[Any] = '''bleu'''
def __init__( self , _A , **_A ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , **UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = hparams.src_lang
__SCREAMING_SNAKE_CASE = hparams.tgt_lang
def _A ( self , _A , _A ):
'''simple docstring'''
return calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
def __lowercase ( a__ , a__=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
check_output_dir(SCREAMING_SNAKE_CASE__ , expected_items=3 )
if model is None:
if "summarization" in args.task:
__SCREAMING_SNAKE_CASE = SummarizationModule(SCREAMING_SNAKE_CASE__ )
else:
__SCREAMING_SNAKE_CASE = TranslationModule(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
__SCREAMING_SNAKE_CASE = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__SCREAMING_SNAKE_CASE = os.environ.get('WANDB_PROJECT' , SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__SCREAMING_SNAKE_CASE = WandbLogger(name=model.output_dir.name , project=f"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
__SCREAMING_SNAKE_CASE = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = args.val_metric == '''loss'''
__SCREAMING_SNAKE_CASE = generic_train(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE__ ) , early_stopping_callback=SCREAMING_SNAKE_CASE__ , logger=SCREAMING_SNAKE_CASE__ , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
__SCREAMING_SNAKE_CASE = ''''''
__SCREAMING_SNAKE_CASE = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=SCREAMING_SNAKE_CASE__ ) )
if checkpoints:
__SCREAMING_SNAKE_CASE = checkpoints[-1]
__SCREAMING_SNAKE_CASE = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowerCAmelCase__ : Tuple =argparse.ArgumentParser()
lowerCAmelCase__ : str =pl.Trainer.add_argparse_args(parser)
lowerCAmelCase__ : Union[str, Any] =SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowerCAmelCase__ : List[Any] =parser.parse_args()
main(args)
| 367 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : List[str] = CycleDiffusionPipeline
UpperCamelCase__ : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
UpperCamelCase__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
UpperCamelCase__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , num_train_timesteps=1_000 , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(_A )
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _A ( self , _A , _A=0 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__SCREAMING_SNAKE_CASE = image / 2 + 0.5
if str(_A ).startswith('mps' ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = CycleDiffusionPipeline(**_A )
__SCREAMING_SNAKE_CASE = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_A )
__SCREAMING_SNAKE_CASE = pipe(**_A )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
for name, module in components.items():
if hasattr(_A , 'half' ):
__SCREAMING_SNAKE_CASE = module.half()
__SCREAMING_SNAKE_CASE = CycleDiffusionPipeline(**_A )
__SCREAMING_SNAKE_CASE = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_A )
__SCREAMING_SNAKE_CASE = pipe(**_A )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _A ( self ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def _A ( self ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def _A ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _A ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _A ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
__SCREAMING_SNAKE_CASE = init_image.resize((512, 512) )
__SCREAMING_SNAKE_CASE = 'CompVis/stable-diffusion-v1-4'
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(_A , subfolder='scheduler' )
__SCREAMING_SNAKE_CASE = CycleDiffusionPipeline.from_pretrained(
_A , scheduler=_A , safety_checker=_A , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = 'A black colored car'
__SCREAMING_SNAKE_CASE = 'A blue colored car'
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=_A , source_prompt=_A , image=_A , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=_A , output_type='np' , )
__SCREAMING_SNAKE_CASE = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
__SCREAMING_SNAKE_CASE = init_image.resize((512, 512) )
__SCREAMING_SNAKE_CASE = 'CompVis/stable-diffusion-v1-4'
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(_A , subfolder='scheduler' )
__SCREAMING_SNAKE_CASE = CycleDiffusionPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = 'A black colored car'
__SCREAMING_SNAKE_CASE = 'A blue colored car'
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=_A , source_prompt=_A , image=_A , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=_A , output_type='np' , )
__SCREAMING_SNAKE_CASE = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 118 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The column name of the images in the files."""} )
UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """A folder containing the training data."""} )
UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """A folder containing the validation data."""} )
UpperCamelCase_ : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _snake_case ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A: Union[str, Any] = {}
if self.train_dir is not None:
A: int = self.train_dir
if self.validation_dir is not None:
A: Any = self.validation_dir
A: List[Any] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : str = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
UpperCamelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase_ : str = field(default=UpperCAmelCase_ , metadata={"""help""": """Name or path of preprocessor config."""} )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
UpperCamelCase_ : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Union[str, Any]:
A: Any = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A: Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A: int = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
A: int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A: Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
A: Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
A: Any = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0:
A: int = ds['''train'''].train_test_split(data_args.train_val_split )
A: Optional[Any] = split['''train''']
A: List[str] = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A: Tuple = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
A: Any = ViTMAEConfig.from_pretrained(model_args.config_name , **__lowercase )
elif model_args.model_name_or_path:
A: Dict = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
A: Optional[int] = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
A: Any = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase )
elif model_args.model_name_or_path:
A: Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
A: List[str] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
A: Any = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
A: Any = ViTMAEForPreTraining(__lowercase )
if training_args.do_train:
A: Optional[Any] = ds['''train'''].column_names
else:
A: Optional[int] = ds['''validation'''].column_names
if data_args.image_column_name is not None:
A: str = data_args.image_column_name
elif "image" in column_names:
A: List[Any] = '''image'''
elif "img" in column_names:
A: str = '''img'''
else:
A: Dict = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
A: str = image_processor.size['''shortest_edge''']
else:
A: Any = (image_processor.size['''height'''], image_processor.size['''width'''])
A: Optional[Any] = Compose(
[
Lambda(lambda __lowercase : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(__lowercase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__lowercase ):
A: Tuple = [transforms(__lowercase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
A: Tuple = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
A: List[str] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowercase )
# Compute absolute learning rate
A: str = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
A: List[str] = training_args.base_learning_rate * total_train_batch_size / 2_5_6
# Initialize our trainer
A: Tuple = Trainer(
model=__lowercase , args=__lowercase , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
A: Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
A: List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A: List[str] = last_checkpoint
A: Optional[Any] = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A: Optional[int] = trainer.evaluate()
trainer.log_metrics('''eval''' , __lowercase )
trainer.save_metrics('''eval''' , __lowercase )
# Write model card and (optionally) push to hub
A: List[Any] = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 319 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
A: Optional[int] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowercase , id=__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A: Tuple = 0
# Doctest custom flag to ignore output.
UpperCamelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCamelCase = doctest.OutputChecker
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = CustomOutputChecker
UpperCamelCase = HfDoctestModule
UpperCamelCase = HfDocTestParser
| 319 | 1 |
"""simple docstring"""
def lowerCAmelCase__ ( ) -> List[str]:
"""simple docstring"""
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
snake_case = 1
snake_case = 2
while i * i <= n:
snake_case = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCAmelCase__ ( ) -> List[str]:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(_UpperCamelCase ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 149 | """simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
SCREAMING_SNAKE_CASE__ = "true"
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=8_2 , _UpperCamelCase : int=1_6 ) -> str:
"""simple docstring"""
set_seed(4_2 )
snake_case = RegressionModel()
snake_case = deepcopy(_UpperCamelCase )
snake_case = RegressionDataset(length=_UpperCamelCase )
snake_case = DataLoader(_UpperCamelCase , batch_size=_UpperCamelCase )
model.to(accelerator.device )
snake_case ,snake_case = accelerator.prepare(_UpperCamelCase , _UpperCamelCase )
return model, ddp_model, dataloader
def lowerCAmelCase__ ( _UpperCamelCase : Accelerator , _UpperCamelCase : Optional[Any]=False ) -> List[str]:
"""simple docstring"""
snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
snake_case = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(_UpperCamelCase : Optional[Any] ):
snake_case = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
with accelerator.main_process_first():
snake_case = dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
snake_case = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCamelCase : Optional[Any] ):
if use_longest:
return tokenizer.pad(_UpperCamelCase , padding='longest' , return_tensors='pt' )
return tokenizer.pad(_UpperCamelCase , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return DataLoader(_UpperCamelCase , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=1_6 )
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
snake_case = Accelerator(dispatch_batches=_UpperCamelCase , split_batches=_UpperCamelCase )
snake_case = get_dataloader(_UpperCamelCase , not dispatch_batches )
snake_case = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCamelCase )
snake_case ,snake_case = accelerator.prepare(_UpperCamelCase , _UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case = []
for batch in dataloader:
snake_case ,snake_case = batch.values()
with torch.no_grad():
snake_case = model(_UpperCamelCase )
snake_case ,snake_case = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
snake_case ,snake_case = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCamelCase )
targs.append(_UpperCamelCase )
snake_case ,snake_case = torch.cat(_UpperCamelCase ), torch.cat(_UpperCamelCase )
return logits, targs
def lowerCAmelCase__ ( _UpperCamelCase : Accelerator , _UpperCamelCase : Tuple=8_2 , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : int=False , _UpperCamelCase : List[str]=1_6 ) -> Optional[Any]:
"""simple docstring"""
snake_case ,snake_case ,snake_case = get_basic_setup(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case ,snake_case = generate_predictions(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert (
len(_UpperCamelCase ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCamelCase )}"""
def lowerCAmelCase__ ( _UpperCamelCase : bool = False , _UpperCamelCase : bool = False ) -> Tuple:
"""simple docstring"""
snake_case = evaluate.load('glue' , 'mrpc' )
snake_case ,snake_case = get_mrpc_setup(_UpperCamelCase , _UpperCamelCase )
# First do baseline
snake_case ,snake_case ,snake_case = setup['no']
model.to(_UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(_UpperCamelCase )
with torch.inference_mode():
snake_case = model(**_UpperCamelCase )
snake_case = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_UpperCamelCase , references=batch['labels'] )
snake_case = metric.compute()
# Then do distributed
snake_case ,snake_case ,snake_case = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
snake_case = model(**_UpperCamelCase )
snake_case = outputs.logits.argmax(dim=-1 )
snake_case = batch['labels']
snake_case ,snake_case = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_UpperCamelCase , references=_UpperCamelCase )
snake_case = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def lowerCAmelCase__ ( ) -> Tuple:
"""simple docstring"""
snake_case = Accelerator(split_batches=_UpperCamelCase , dispatch_batches=_UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(_UpperCamelCase , _UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
snake_case = Accelerator(split_batches=_UpperCamelCase , dispatch_batches=_UpperCamelCase )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(_UpperCamelCase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
snake_case = Accelerator()
test_torch_metrics(_UpperCamelCase , 5_1_2 )
accelerator.state._reset_state()
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 149 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A : Optional[Any] = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(a )
class __A( a ):
snake_case_ = '''rag'''
snake_case_ = True
def __init__( self , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=" / " , _snake_case=" // " , _snake_case=5 , _snake_case=300 , _snake_case=768 , _snake_case=8 , _snake_case="wiki_dpr" , _snake_case="train" , _snake_case="compressed" , _snake_case=None , _snake_case=None , _snake_case=False , _snake_case=False , _snake_case=0.0 , _snake_case=True , _snake_case=False , _snake_case=False , _snake_case=False , _snake_case=True , _snake_case=None , **_snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
bos_token_id=_snake_case , pad_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , forced_eos_token_id=_snake_case , is_encoder_decoder=_snake_case , prefix=_snake_case , vocab_size=_snake_case , **_snake_case , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__a = kwargs.pop('''question_encoder''' )
__a = question_encoder_config.pop('''model_type''' )
__a = kwargs.pop('''generator''' )
__a = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
__a = AutoConfig.for_model(_snake_case , **_snake_case )
__a = AutoConfig.for_model(_snake_case , **_snake_case )
__a = reduce_loss
__a = label_smoothing
__a = exclude_bos_score
__a = do_marginalize
__a = title_sep
__a = doc_sep
__a = n_docs
__a = max_combined_length
__a = dataset
__a = dataset_split
__a = index_name
__a = retrieval_vector_size
__a = retrieval_batch_size
__a = passages_path
__a = index_path
__a = use_dummy_dataset
__a = output_retrieved
__a = do_deduplication
__a = use_cache
if self.forced_eos_token_id is None:
__a = getattr(self.generator , '''forced_eos_token_id''' , _snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , _snake_case , **_snake_case ) -> PretrainedConfig:
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = copy.deepcopy(self.__dict__ )
__a = self.question_encoder.to_dict()
__a = self.generator.to_dict()
__a = self.__class__.model_type
return output | 6 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=None , _UpperCAmelCase="no" , _UpperCAmelCase="29500" ) -> Tuple:
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Dict = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
lowerCamelCase__ : Optional[Any] = True
elif "IPython" in sys.modules:
lowerCamelCase__ : Optional[Any] = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
lowerCamelCase__ : List[str] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , _UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
lowerCamelCase__ : Optional[Any] = 8
lowerCamelCase__ : List[str] = PrepareForLaunch(_UpperCAmelCase , distributed_type='TPU' )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*_UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr='127.0.01' , master_port=_UpperCAmelCase , mixed_precision=_UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = PrepareForLaunch(_UpperCAmelCase , distributed_type='MULTI_GPU' )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase__ : int = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=() , _UpperCAmelCase=2 ) -> Optional[Any]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCAmelCase , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
lowerCamelCase__ : Optional[Any] = PrepareForLaunch(_UpperCAmelCase , debug=_UpperCAmelCase )
start_processes(_UpperCAmelCase , args=_UpperCAmelCase , nprocs=_UpperCAmelCase , start_method='fork' )
| 50 | 0 |
from bisect import bisect
from itertools import accumulate
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str ) -> Any:
UpperCamelCase : Tuple = sorted(zip(snake_case__ , snake_case__ ) , key=lambda snake_case__ : x[0] / x[1] , reverse=snake_case__ )
UpperCamelCase , UpperCamelCase : str = [i[0] for i in r], [i[1] for i in r]
UpperCamelCase : Optional[Any] = list(accumulate(snake_case__ ) )
UpperCamelCase : Dict = bisect(snake_case__ , snake_case__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = JukeboxTokenizer
UpperCAmelCase__ : Optional[int] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def snake_case_ ( self ) -> Optional[Any]:
import torch
UpperCamelCase : Tuple = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
UpperCamelCase : List[str] = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase : Dict = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2] ) )
@require_torch
def snake_case_ ( self ) -> Optional[Any]:
import torch
UpperCamelCase : str = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
UpperCamelCase : Dict = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase : Optional[int] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2] ) )
| 103 | 1 |
import warnings
from .generation import TFGenerationMixin
class __lowerCamelCase (_a ):
# warning at import time
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , _a , )
| 310 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 310 | 1 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
__snake_case = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
__snake_case = {
"""vinai/phobert-base""": 2_56,
"""vinai/phobert-large""": 2_56,
}
def _lowercase ( UpperCamelCase_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
SCREAMING_SNAKE_CASE__ = set(UpperCamelCase_ )
return pairs
class lowercase__ ( _UpperCAmelCase ):
A__ : str =VOCAB_FILES_NAMES
A__ : Tuple =PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]="<s>" , UpperCAmelCase_ : List[Any]="</s>" , UpperCAmelCase_ : Union[str, Any]="</s>" , UpperCAmelCase_ : Dict="<s>" , UpperCAmelCase_ : int="<unk>" , UpperCAmelCase_ : List[str]="<pad>" , UpperCAmelCase_ : Optional[int]="<mask>" , **UpperCAmelCase_ : Tuple , ):
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = merges_file
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 3
self.add_from_file(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8' ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split('\n' )[:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split()[:-1] ) for merge in merges]
SCREAMING_SNAKE_CASE__ = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
def A_ ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A_ ( self : Optional[int] ):
return len(self.encoder )
def A_ ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def A_ ( self : Tuple , UpperCAmelCase_ : Tuple ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
SCREAMING_SNAKE_CASE__ = get_pairs(UpperCAmelCase_ )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(UpperCAmelCase_ ):
try:
SCREAMING_SNAKE_CASE__ = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = '@@ '.join(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = word[:-4]
SCREAMING_SNAKE_CASE__ = word
return word
def A_ ( self : Dict , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = re.findall(r'\S+\n?' , UpperCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(' ' ) ) )
return split_tokens
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def A_ ( self : Optional[Any] , UpperCAmelCase_ : List[Any] ):
return self.decoder.get(UpperCAmelCase_ , self.unk_token )
def A_ ( self : Dict , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = ' '.join(UpperCAmelCase_ ).replace('@@ ' , '' ).strip()
return out_string
def A_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE__ = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.merges_file , UpperCAmelCase_ )
return out_vocab_file, out_merge_file
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Any ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
try:
with open(UpperCAmelCase_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(UpperCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
SCREAMING_SNAKE_CASE__ = f.readlines()
for lineTmp in lines:
SCREAMING_SNAKE_CASE__ = lineTmp.strip()
SCREAMING_SNAKE_CASE__ = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
SCREAMING_SNAKE_CASE__ = line[:idx]
SCREAMING_SNAKE_CASE__ = len(self.encoder )
| 169 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES
else:
SCREAMING_SNAKE_CASE__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES[tokenizer_name]
SCREAMING_SNAKE_CASE__ = True
if checkpoint_name is None:
SCREAMING_SNAKE_CASE__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
SCREAMING_SNAKE_CASE__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = checkpoint.split('/' )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
SCREAMING_SNAKE_CASE__ = checkpoint
SCREAMING_SNAKE_CASE__ = dump_path
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
SCREAMING_SNAKE_CASE__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
SCREAMING_SNAKE_CASE__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
SCREAMING_SNAKE_CASE__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(UpperCamelCase_ )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
__snake_case = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 169 | 1 |
'''simple docstring'''
UpperCamelCase_ = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCamelCase_ = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCamelCase_ = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 309 |
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309 | 1 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : float | Decimal , _UpperCamelCase : float = 10**-10 ) -> float:
'''simple docstring'''
UpperCamelCase__ = a
while True:
UpperCamelCase__ = Decimal(_UpperCamelCase ) - (
Decimal(eval(_UpperCamelCase ) ) / Decimal(eval(str(diff(_UpperCamelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_UpperCamelCase ) ) < precision: # noqa: S307
return float(_UpperCamelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""") | 31 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__UpperCamelCase :str = [[1, 2, 4], [1, 2, 3, 4]]
__UpperCamelCase :Dict = DisjunctiveConstraint(__lowercase)
self.assertTrue(isinstance(dc.token_ids , __lowercase))
with self.assertRaises(__lowercase):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(__lowercase):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def UpperCamelCase__ ( self) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__UpperCamelCase :Dict = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowercase):
DisjunctiveConstraint(__lowercase) # fails here
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Union[str, Any] = [[1, 2, 3], [1, 2, 4]]
__UpperCamelCase :Any = DisjunctiveConstraint(__lowercase)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = dc.update(1)
__UpperCamelCase :Union[str, Any] = stepped is True and completed is False and reset is False
self.assertTrue(__lowercase)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = dc.update(2)
__UpperCamelCase :Tuple = stepped is True and completed is False and reset is False
self.assertTrue(__lowercase)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Dict = dc.update(3)
__UpperCamelCase :List[Any] = stepped is True and completed is True and reset is False
self.assertTrue(__lowercase)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Dict = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__UpperCamelCase :List[Any] = DisjunctiveConstraint(__lowercase)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[str] = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Union[str, Any] = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Union[str, Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Tuple = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 43 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Optional[int]:
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : Dict = use_input_mask
UpperCAmelCase : str = use_token_type_ids
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : int = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : List[str] = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : str = num_labels
UpperCAmelCase : Optional[int] = num_choices
UpperCAmelCase : Dict = scope
UpperCAmelCase : Union[str, Any] = vocab_size - 1
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowercase( self ) -> Optional[Any]:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase : Any = True
return config, input_ids, input_mask, token_labels
def _lowercase( self , A , A , A ) -> int:
UpperCAmelCase : str = GPTNeoXModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[str] = model(A , attention_mask=A )
UpperCAmelCase : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A ) -> Optional[int]:
UpperCAmelCase : str = True
UpperCAmelCase : Optional[Any] = GPTNeoXModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A ) -> List[str]:
UpperCAmelCase : Tuple = GPTNeoXForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : str = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A ) -> Tuple:
UpperCAmelCase : List[str] = self.num_labels
UpperCAmelCase : Any = GPTNeoXForQuestionAnswering(A )
model.to(A )
model.eval()
UpperCAmelCase : str = model(A , attention_mask=A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase( self , A , A , A , A ) -> int:
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A , A ) -> str:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Tuple = GPTNeoXForTokenClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase( self , A , A , A ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : str = GPTNeoXForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : List[str] = model(A , attention_mask=A , use_cache=A )
UpperCAmelCase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A )
UpperCAmelCase : Any = output_from_no_past["""hidden_states"""][0]
UpperCAmelCase : List[str] = model(
A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = GPTNeoXModelTester(self )
UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A , A , A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(A , A , A )
def _lowercase( self ) -> Optional[Any]:
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(A , A , A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*A )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def _lowercase( self ) -> Any:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _lowercase( self ) -> Optional[int]:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Dict = GPTNeoXModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : Any = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = GPTNeoXModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCAmelCase : int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(A )
UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase : List[str] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCAmelCase : Union[str, Any] = model.generate(**A , do_sample=A , max_new_tokens=20 )
UpperCAmelCase : Tuple = tokenizer.batch_decode(A )[0]
self.assertEqual(A , A )
| 265 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase__ ( cls : int , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase__ ( cls : Optional[int] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[Any] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase__ ( cls : List[Any] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase__ ( cls : str , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *_UpperCAmelCase : int , **_UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase__ ( cls : List[Any] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Union[str, Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase__ ( cls : Tuple , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase__ ( cls : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *_UpperCAmelCase : int , **_UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase__ ( cls : List[str] , *_UpperCAmelCase : Any , **_UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase__ ( cls : List[Any] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase__ ( cls : Any , *_UpperCAmelCase : int , **_UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase__ ( cls : List[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 241 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 241 | 1 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowercase__ :Tuple = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowercase__ :Optional[Any] = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCAmelCase__ )[0]
@deprecated(lowerCAmelCase__ , '''Please use tf.data to implement this functionality.''' )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=lowerCAmelCase__ ) as bytestream:
lowercase = _readaa(lowerCAmelCase__ )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
lowercase = _readaa(lowerCAmelCase__ )
lowercase = _readaa(lowerCAmelCase__ )
lowercase = _readaa(lowerCAmelCase__ )
lowercase = bytestream.read(rows * cols * num_images )
lowercase = numpy.frombuffer(lowerCAmelCase__ , dtype=numpy.uinta )
lowercase = data.reshape(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , 1 )
return data
@deprecated(lowerCAmelCase__ , '''Please use tf.one_hot on tensors.''' )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = labels_dense.shape[0]
lowercase = numpy.arange(lowerCAmelCase__ ) * num_classes
lowercase = numpy.zeros((num_labels, num_classes) )
lowercase = 1
return labels_one_hot
@deprecated(lowerCAmelCase__ , '''Please use tf.data to implement this functionality.''' )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=10 ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=lowerCAmelCase__ ) as bytestream:
lowercase = _readaa(lowerCAmelCase__ )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
lowercase = _readaa(lowerCAmelCase__ )
lowercase = bytestream.read(lowerCAmelCase__ )
lowercase = numpy.frombuffer(lowerCAmelCase__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowerCAmelCase__ , lowerCAmelCase__ )
return labels
class lowercase :
@deprecated(
A__ ,'''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' ,)
def __init__( self ,A__ ,A__ ,A__=False ,A__=False ,A__=dtypes.floataa ,A__=True ,A__=None ,):
lowercase , lowercase = random_seed.get_seed(A__)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda)
lowercase = dtypes.as_dtype(A__).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype)
if fake_data:
lowercase = 1_0_0_0_0
lowercase = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
lowercase = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase = images.reshape(
images.shape[0] ,images.shape[1] * images.shape[2])
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase = images.astype(numpy.floataa)
lowercase = numpy.multiply(A__ ,1.0 / 255.0)
lowercase = images
lowercase = labels
lowercase = 0
lowercase = 0
@property
def A__ ( self):
return self._images
@property
def A__ ( self):
return self._labels
@property
def A__ ( self):
return self._num_examples
@property
def A__ ( self):
return self._epochs_completed
def A__ ( self ,A__ ,A__=False ,A__=True):
if fake_data:
lowercase = [1] * 7_8_4
lowercase = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A__)],
[fake_label for _ in range(A__)],
)
lowercase = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase = numpy.arange(self._num_examples)
numpy.random.shuffle(A__)
lowercase = self.images[perma]
lowercase = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase = self._num_examples - start
lowercase = self._images[start : self._num_examples]
lowercase = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase = numpy.arange(self._num_examples)
numpy.random.shuffle(A__)
lowercase = self.images[perm]
lowercase = self.labels[perm]
# Start next epoch
lowercase = 0
lowercase = batch_size - rest_num_examples
lowercase = self._index_in_epoch
lowercase = self._images[start:end]
lowercase = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) ,axis=0),
numpy.concatenate((labels_rest_part, labels_new_part) ,axis=0),
)
else:
self._index_in_epoch += batch_size
lowercase = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCAmelCase__ , '''Please write your own downloading logic.''' )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if not gfile.Exists(lowerCAmelCase__ ):
gfile.MakeDirs(lowerCAmelCase__ )
lowercase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if not gfile.Exists(lowerCAmelCase__ ):
urllib.request.urlretrieve(lowerCAmelCase__ , lowerCAmelCase__ ) # noqa: S310
with gfile.GFile(lowerCAmelCase__ ) as f:
lowercase = f.size()
print('''Successfully downloaded''' , lowerCAmelCase__ , lowerCAmelCase__ , '''bytes.''' )
return filepath
@deprecated(
lowerCAmelCase__ , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=dtypes.floataa , lowerCAmelCase__=True , lowerCAmelCase__=5000 , lowerCAmelCase__=None , lowerCAmelCase__=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCAmelCase__ , one_hot=lowerCAmelCase__ , dtype=lowerCAmelCase__ , seed=lowerCAmelCase__ )
lowercase = fake()
lowercase = fake()
lowercase = fake()
return _Datasets(train=lowerCAmelCase__ , validation=lowerCAmelCase__ , test=lowerCAmelCase__ )
if not source_url: # empty string check
lowercase = DEFAULT_SOURCE_URL
lowercase = '''train-images-idx3-ubyte.gz'''
lowercase = '''train-labels-idx1-ubyte.gz'''
lowercase = '''t10k-images-idx3-ubyte.gz'''
lowercase = '''t10k-labels-idx1-ubyte.gz'''
lowercase = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + train_images_file )
with gfile.Open(lowerCAmelCase__ , '''rb''' ) as f:
lowercase = _extract_images(lowerCAmelCase__ )
lowercase = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + train_labels_file )
with gfile.Open(lowerCAmelCase__ , '''rb''' ) as f:
lowercase = _extract_labels(lowerCAmelCase__ , one_hot=lowerCAmelCase__ )
lowercase = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + test_images_file )
with gfile.Open(lowerCAmelCase__ , '''rb''' ) as f:
lowercase = _extract_images(lowerCAmelCase__ )
lowercase = _maybe_download(
lowerCAmelCase__ , lowerCAmelCase__ , source_url + test_labels_file )
with gfile.Open(lowerCAmelCase__ , '''rb''' ) as f:
lowercase = _extract_labels(lowerCAmelCase__ , one_hot=lowerCAmelCase__ )
if not 0 <= validation_size <= len(lowerCAmelCase__ ):
lowercase = (
'''Validation size should be between 0 and '''
f'{len(lowerCAmelCase__ )}. Received: {validation_size}.'
)
raise ValueError(lowerCAmelCase__ )
lowercase = train_images[:validation_size]
lowercase = train_labels[:validation_size]
lowercase = train_images[validation_size:]
lowercase = train_labels[validation_size:]
lowercase = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
lowercase = _DataSet(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase = _DataSet(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase = _DataSet(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
return _Datasets(train=lowerCAmelCase__ , validation=lowerCAmelCase__ , test=lowerCAmelCase__ )
| 101 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[int] = "▁"
_lowercase : Optional[Any] = {"vocab_file": "spiece.model"}
_lowercase : Optional[Any] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
_lowercase : Tuple = {
"google/pegasus-xsum": 5_1_2,
}
_lowercase : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Any="<pad>", lowerCamelCase : Optional[Any]="</s>", lowerCamelCase : Any="<unk>", lowerCamelCase : Tuple="<mask_2>", lowerCamelCase : int="<mask_1>", lowerCamelCase : Optional[Any]=None, lowerCamelCase : Dict=103, lowerCamelCase : Optional[Dict[str, Any]] = None, **lowerCamelCase : Optional[int], )-> None:
lowerCamelCase__ : Union[str, Any] =offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise TypeError(
F'''additional_special_tokens should be of type {type(lowerCamelCase )}, but is'''
F''' {type(lowerCamelCase )}''' )
lowerCamelCase__ : Any =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(lowerCamelCase ), self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowerCamelCase__ : Optional[Any] =additional_special_tokens_extended
else:
lowerCamelCase__ : Tuple =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2, self.offset )]
lowerCamelCase__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase, unk_token=lowerCamelCase, mask_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token_sent=lowerCamelCase, offset=lowerCamelCase, additional_special_tokens=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
lowerCamelCase__ : Optional[int] =mask_token_sent
lowerCamelCase__ : Optional[Any] =vocab_file
lowerCamelCase__ : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase )
# add special tokens to encoder dict
lowerCamelCase__ : Dict[int, str] ={
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1 )} )
lowerCamelCase__ : Dict[str, int] ={v: k for k, v in self.encoder.items()}
@property
def snake_case ( self : Union[str, Any] )-> int:
return len(self.sp_model ) + self.offset
def snake_case ( self : Optional[Any] )-> Dict[str, int]:
lowerCamelCase__ : List[Any] ={self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str )-> List[Any]:
lowerCamelCase__ : Optional[Any] =self.__dict__.copy()
lowerCamelCase__ : Optional[int] =None
return state
def __setstate__( self : Dict, lowerCamelCase : int )-> Optional[Any]:
lowerCamelCase__ : Optional[int] =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase__ : str ={}
lowerCamelCase__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self : Any, lowerCamelCase : str )-> List[str]:
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase )
def snake_case ( self : int, lowerCamelCase : str )-> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCamelCase__ : Any =self.sp_model.piece_to_id(lowerCamelCase )
return sp_id + self.offset
def snake_case ( self : Tuple, lowerCamelCase : int )-> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCamelCase__ : Any =self.sp_model.IdToPiece(index - self.offset )
return token
def snake_case ( self : List[Any], lowerCamelCase : Optional[int] )-> Any:
lowerCamelCase__ : Optional[int] =[]
lowerCamelCase__ : Tuple =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase ) + token
lowerCamelCase__ : str =[]
else:
current_sub_tokens.append(lowerCamelCase )
out_string += self.sp_model.decode(lowerCamelCase )
return out_string.strip()
def snake_case ( self : Union[str, Any], lowerCamelCase : Union[str, Any]=False )-> List[str]:
return 1
def snake_case ( self : Tuple, lowerCamelCase : Optional[int] )-> Tuple:
lowerCamelCase__ : Tuple =set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def snake_case ( self : Any, lowerCamelCase : List, lowerCamelCase : Optional[List] = None, lowerCamelCase : bool = False )-> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def snake_case ( self : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Optional[int]=None )-> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : List[str] =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase, '''wb''' ) as fi:
lowerCamelCase__ : int =self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
| 238 | 0 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowercase : List[str] ="bart"
_lowercase : Any =True
@st.cache(allow_output_mutation=_lowercase)
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
if LOAD_DENSE_INDEX:
a__ : str = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""")
a__ : Any = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""").to("""cuda:0""")
a__ : List[str] = qar_model.eval()
else:
a__ , a__ : Dict = (None, None)
if MODEL_TYPE == "bart":
a__ : Any = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""")
a__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""").to("""cuda:0""")
a__ : List[str] = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""")
sas_model.load_state_dict(save_dict["""model"""])
a__ : Optional[int] = sas_model.eval()
else:
a__ , a__ : Tuple = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""")
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowercase)
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
if LOAD_DENSE_INDEX:
a__ : Optional[Any] = faiss.StandardGpuResources()
a__ : List[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""")["""train"""]
a__ : int = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
a__ : Optional[int] = faiss.IndexFlatIP(128)
a__ : Dict = faiss.index_cpu_to_gpu(_lowercase , 1 , _lowercase)
wikiaab_gpu_index_flat.add(_lowercase) # TODO fix for larger GPU
else:
a__ , a__ : int = (None, None)
a__ : Optional[int] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}])
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowercase)
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
a__ : str = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""")
a__ : Tuple = elia["""train_eli5"""]
a__ : Dict = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128))
a__ : int = faiss.IndexFlatIP(128)
eli5_train_q_index.add(_lowercase)
return (elia_train, eli5_train_q_index)
_lowercase , _lowercase , _lowercase : Dict =load_indexes()
_lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] =load_models()
_lowercase , _lowercase : Optional[Any] =load_train_data()
def lowerCAmelCase_ ( _lowercase : Tuple , _lowercase : Dict=10) -> List[str]:
"""simple docstring"""
a__ : Union[str, Any] = embed_questions_for_retrieval([question] , _lowercase , _lowercase)
a__ , a__ : int = eli5_train_q_index.search(_lowercase , _lowercase)
a__ : int = [elia_train[int(_lowercase)] for i in I[0]]
return nn_examples
def lowerCAmelCase_ ( _lowercase : int , _lowercase : Dict="wiki40b" , _lowercase : List[str]="dense" , _lowercase : List[str]=10) -> str:
"""simple docstring"""
if source == "none":
a__ , a__ : Tuple = (""" <P> """.join(["""""" for _ in range(11)]).strip(), [])
else:
if method == "dense":
a__ , a__ : Optional[int] = query_qa_dense_index(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
else:
a__ , a__ : Dict = query_es_index(
_lowercase , _lowercase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowercase , )
a__ : Any = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
a__ : Optional[Any] = """question: {} context: {}""".format(_lowercase , _lowercase)
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowercase: None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowercase: None),
})
def lowerCAmelCase_ ( _lowercase : Tuple , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int]=64 , _lowercase : List[Any]=256 , _lowercase : int=False , _lowercase : Optional[int]=2 , _lowercase : Optional[int]=0.95 , _lowercase : Dict=0.8) -> Tuple:
"""simple docstring"""
with torch.no_grad():
a__ : Dict = qa_sas_generate(
_lowercase , _lowercase , _lowercase , num_answers=1 , num_beams=_lowercase , min_len=_lowercase , max_len=_lowercase , do_sample=_lowercase , temp=_lowercase , top_p=_lowercase , top_k=_lowercase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
_lowercase : Dict ="<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
_lowercase : Tuple ="\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowercase : List[str] ="\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowercase : List[Any] =[
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
_lowercase : str =st.sidebar.checkbox("Demo options")
if demo_options:
_lowercase : Any =st.sidebar.selectbox(
"",
action_list,
index=3,
)
_lowercase : Union[str, Any] =action_list.index(action_st)
_lowercase : int =st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
_lowercase : Dict =show_type == "Show full text of passages"
else:
_lowercase : int =3
_lowercase : Any =True
_lowercase : List[str] =st.sidebar.checkbox("Retrieval options")
if retrieval_options:
_lowercase : Dict ="\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
_lowercase : Optional[int] =st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
_lowercase : List[Any] =st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
_lowercase : Union[str, Any] ="wiki40b"
_lowercase : Tuple ="dense"
_lowercase : Dict ="beam"
_lowercase : Dict =2
_lowercase : Dict =64
_lowercase : List[str] =256
_lowercase : Any =None
_lowercase : int =None
_lowercase : Union[str, Any] =st.sidebar.checkbox("Generation options")
if generate_options:
_lowercase : List[Any] ="\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
_lowercase : Tuple =st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
_lowercase : int =st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_lowercase : str =st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_lowercase : int =st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowercase : List[Any] =st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowercase : Optional[Any] =st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowercase : Any =None
# start main text
_lowercase : int =[
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
_lowercase : List[Any] =st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowercase : List[Any] =st.text_input("Enter your question here:", "")
else:
_lowercase : List[str] =question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowercase , _lowercase : Dict =make_support(question, source=wiki_source, method="dense", n_results=10)
_lowercase , _lowercase : List[str] =make_support(question, source=wiki_source, method="sparse", n_results=10)
_lowercase : Optional[int] =[]
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowercase : Any =support_list[:10]
_lowercase : List[Any] ="<P> " + " <P> ".join([res[-1] for res in support_list])
else:
_lowercase , _lowercase : Optional[int] =make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowercase , _lowercase : str =answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
_lowercase : List[Any] ="https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
_lowercase : str =res[1].strip()
if sec_titles == "":
_lowercase : Tuple ="[{}]({})".format(res[0], wiki_url)
else:
_lowercase : Optional[Any] =sec_titles.split(" & ")
_lowercase : str =" & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
_lowercase : Optional[Any] =find_nearest_training(question)
_lowercase : Tuple =nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
_lowercase : str =[
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
_lowercase : Tuple ="\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 266 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowercase : str =logging.getLogger(__name__)
@dataclass
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__lowerCAmelCase :bool = field(default=A__ , metadata={"help": "Whether to SortishSamler or not."} )
__lowerCAmelCase :bool = field(
default=A__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__lowerCAmelCase :bool = field(default=A__ , metadata={"help": "whether to use adafactor"} )
__lowerCAmelCase :Optional[float] = field(
default=A__ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__lowerCAmelCase :Optional[float] = field(
default=A__ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__lowerCAmelCase :Optional[float] = field(default=A__ , metadata={"help": "Dropout probability. Goes into model.config."} )
__lowerCAmelCase :Optional[float] = field(
default=A__ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
__lowerCAmelCase :Optional[str] = field(
default="linear" , metadata={"help": f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 266 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''falcon'''
A__ = ['''past_key_values''']
def __init__(self : str , _UpperCAmelCase : Dict=6_5024 , _UpperCAmelCase : Optional[Any]=4544 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Optional[Any]=71 , _UpperCAmelCase : List[Any]=1E-5 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Optional[int]=11 , _UpperCAmelCase : Optional[Any]=11 , **_UpperCAmelCase : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowercase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__ = kwargs.pop("""n_embed""" , _UpperCAmelCase )
lowercase__ = hidden_size if n_embed is None else n_embed
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = bos_token_id
lowercase__ = eos_token_id
lowercase__ = num_attention_heads if num_kv_heads is None else num_kv_heads
lowercase__ = alibi
lowercase__ = new_decoder_architecture
lowercase__ = multi_query # Ignored when new_decoder_architecture is True
lowercase__ = parallel_attn
lowercase__ = bias
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCamelCase__ (self : Tuple ) -> int:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
return not self.alibi
| 305 |
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
assert column_title.isupper()
lowercase__ = 0
lowercase__ = len(__magic_name__ ) - 1
lowercase__ = 0
while index >= 0:
lowercase__ = (ord(column_title[index] ) - 64) * pow(26 , __magic_name__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 305 | 1 |
import os
_A : Any = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 1_00, 'D': 5_00, 'M': 10_00}
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Union[str, Any] = 0
while index < len(UpperCAmelCase ) - 1:
lowerCamelCase__ : Optional[Any] = SYMBOLS[numerals[index]]
lowerCamelCase__ : Optional[int] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : Tuple = ''''''
lowerCamelCase__ : Optional[Any] = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCamelCase__ : Tuple = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCamelCase__ : Any = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _a ( UpperCAmelCase = "/p089_roman.txt" ) -> int:
"""simple docstring"""
lowerCamelCase__ : Dict = 0
with open(os.path.dirname(UpperCAmelCase ) + roman_numerals_filename ) as filea:
lowerCamelCase__ : Optional[Any] = filea.readlines()
for line in lines:
lowerCamelCase__ : List[str] = line.strip()
lowerCamelCase__ : Union[str, Any] = parse_roman_numerals(UpperCAmelCase )
lowerCamelCase__ : Dict = generate_roman_numerals(UpperCAmelCase )
savings += len(UpperCAmelCase ) - len(UpperCAmelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 351 |
from __future__ import annotations
_A : List[str] = '#'
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] ) ->None:
lowerCamelCase__ : dict = {}
def __lowerCamelCase ( self : Union[str, Any] , A : str ) ->None:
lowerCamelCase__ : Any = self._trie
for char in text:
if char not in trie:
lowerCamelCase__ : Any = {}
lowerCamelCase__ : Any = trie[char]
lowerCamelCase__ : List[str] = True
def __lowerCamelCase ( self : List[Any] , A : str ) ->tuple | list:
lowerCamelCase__ : Dict = self._trie
for char in prefix:
if char in trie:
lowerCamelCase__ : List[Any] = trie[char]
else:
return []
return self._elements(A )
def __lowerCamelCase ( self : Dict , A : dict ) ->tuple:
lowerCamelCase__ : Optional[Any] = []
for c, v in d.items():
lowerCamelCase__ : Any = [''' '''] if c == END else [(c + s) for s in self._elements(A )]
result.extend(A )
return tuple(A )
_A : str = Trie()
_A : List[Any] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _a ( UpperCAmelCase ) -> tuple:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = trie.find_word(UpperCAmelCase )
return tuple(string + word for word in suffixes )
def _a ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 265 | 0 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> list:
'''simple docstring'''
def merge(__A , __A ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__A ) <= 1:
return collection
UpperCamelCase__ = len(__A ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = input('Enter numbers separated by a comma:\n').strip()
a__ : Tuple = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 80 | import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A : str = logging.get_logger(__name__)
def a__ ( __UpperCamelCase , __UpperCamelCase=False ):
SCREAMING_SNAKE_CASE_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
SCREAMING_SNAKE_CASE_ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_ = ""
else:
SCREAMING_SNAKE_CASE_ = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-config.hidden_size :]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = dct.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = val
def a__ ( ):
SCREAMING_SNAKE_CASE_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = DeiTConfig()
# all deit models have fine-tuned heads
SCREAMING_SNAKE_CASE_ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE_ = 1_0_0_0
SCREAMING_SNAKE_CASE_ = "huggingface/label-files"
SCREAMING_SNAKE_CASE_ = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = int(deit_name[-6:-4] )
SCREAMING_SNAKE_CASE_ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
SCREAMING_SNAKE_CASE_ = 1_9_2
SCREAMING_SNAKE_CASE_ = 7_6_8
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 3
elif deit_name[9:].startswith("small" ):
SCREAMING_SNAKE_CASE_ = 3_8_4
SCREAMING_SNAKE_CASE_ = 1_5_3_6
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
SCREAMING_SNAKE_CASE_ = 1_0_2_4
SCREAMING_SNAKE_CASE_ = 4_0_9_6
SCREAMING_SNAKE_CASE_ = 2_4
SCREAMING_SNAKE_CASE_ = 1_6
# load original model from timm
SCREAMING_SNAKE_CASE_ = timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE_ = timm_model.state_dict()
SCREAMING_SNAKE_CASE_ = create_rename_keys(__UpperCamelCase , __UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = DeiTForImageClassificationWithTeacher(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
SCREAMING_SNAKE_CASE_ = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
SCREAMING_SNAKE_CASE_ = DeiTImageProcessor(size=__UpperCamelCase , crop_size=config.image_size )
SCREAMING_SNAKE_CASE_ = image_processor(images=prepare_img() , return_tensors="pt" )
SCREAMING_SNAKE_CASE_ = encoding["pixel_values"]
SCREAMING_SNAKE_CASE_ = model(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A : Dict = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 118 | 0 |
'''simple docstring'''
lowerCamelCase_ = tuple[float, float, float]
lowerCamelCase_ = tuple[float, float, float]
def __lowercase ( __lowercase , __lowercase ) -> Vectorad:
'''simple docstring'''
_A = end_pointa[0] - end_pointa[0]
_A = end_pointa[1] - end_pointa[1]
_A = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __lowercase ( __lowercase , __lowercase ) -> Vectorad:
'''simple docstring'''
_A = ab[1] * ac[2] - ab[2] * ac[1] # *i
_A = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_A = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __lowercase ( __lowercase , __lowercase ) -> bool:
'''simple docstring'''
return tuple(round(__lowercase , __lowercase ) for x in vector ) == (0, 0, 0)
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase = 10 ) -> bool:
'''simple docstring'''
_A = create_vector(__lowercase , __lowercase )
_A = create_vector(__lowercase , __lowercase )
return is_zero_vector(get_ad_vectors_cross(__lowercase , __lowercase ) , __lowercase )
| 360 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''camembert'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : Optional[Any]=12 , __UpperCAmelCase : Optional[Any]=12 , __UpperCAmelCase : Dict=3072 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : List[Any]=512 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : List[str]=1E-12 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : str=2 , __UpperCAmelCase : int="absolute" , __UpperCAmelCase : Any=True , __UpperCAmelCase : int=None , **__UpperCAmelCase : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
_A = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 174 | 0 |
class _a :
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Tuple=None , __lowerCamelCase: Optional[Any]=None ):
'''simple docstring'''
UpperCamelCase__: Any = data
UpperCamelCase__: Tuple = previous
UpperCamelCase__: Any = next_node
def __str__( self: str ):
'''simple docstring'''
return F"{self.data}"
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return self.data
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
return self.next
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
return self.previous
class _a :
"""simple docstring"""
def __init__( self: List[str] , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = head
def __iter__( self: Optional[int] ):
'''simple docstring'''
return self
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
UpperCamelCase__: Tuple = self.current.get_data()
UpperCamelCase__: str = self.current.get_next()
return value
class _a :
"""simple docstring"""
def __init__( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: List[str] = None # First node in list
UpperCamelCase__: str = None # Last node in list
def __str__( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Dict = self.head
UpperCamelCase__: int = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase__: Optional[Any] = current.get_next()
return " ".join(str(__lowerCamelCase ) for node in nodes )
def __contains__( self: List[str] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Any = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase__: int = current.get_next()
return False
def __iter__( self: List[Any] ):
'''simple docstring'''
return LinkedListIterator(self.head )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: Node ):
'''simple docstring'''
if self.head is None:
UpperCamelCase__: List[str] = node
UpperCamelCase__: List[str] = node
else:
self.insert_before_node(self.head , __lowerCamelCase )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: Node ):
'''simple docstring'''
if self.head is None:
self.set_head(__lowerCamelCase )
else:
self.insert_after_node(self.tail , __lowerCamelCase )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = Node(__lowerCamelCase )
if self.head is None:
self.set_head(__lowerCamelCase )
else:
self.set_tail(__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Node , __lowerCamelCase: Node ):
'''simple docstring'''
UpperCamelCase__: Tuple = node
UpperCamelCase__: int = node.previous
if node.get_previous() is None:
UpperCamelCase__: List[str] = node_to_insert
else:
UpperCamelCase__: Union[str, Any] = node_to_insert
UpperCamelCase__: Dict = node_to_insert
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Node , __lowerCamelCase: Node ):
'''simple docstring'''
UpperCamelCase__: List[Any] = node
UpperCamelCase__: Dict = node.next
if node.get_next() is None:
UpperCamelCase__: Optional[int] = node_to_insert
else:
UpperCamelCase__: Optional[int] = node_to_insert
UpperCamelCase__: Any = node_to_insert
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = 1
UpperCamelCase__: Dict = Node(__lowerCamelCase )
UpperCamelCase__: Dict = self.head
while node:
if current_position == position:
self.insert_before_node(__lowerCamelCase , __lowerCamelCase )
return
current_position += 1
UpperCamelCase__: Dict = node.next
self.insert_after_node(self.tail , __lowerCamelCase )
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Any = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase__: str = node.get_next()
raise Exception("Node not found" )
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: Any ):
'''simple docstring'''
if (node := self.get_node(__lowerCamelCase )) is not None:
if node == self.head:
UpperCamelCase__: List[Any] = self.head.get_next()
if node == self.tail:
UpperCamelCase__: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(__lowerCamelCase )
@staticmethod
def UpperCAmelCase_ ( __lowerCamelCase: Node ):
'''simple docstring'''
if node.get_next():
UpperCamelCase__: List[str] = node.previous
if node.get_previous():
UpperCamelCase__: Union[str, Any] = node.next
UpperCamelCase__: Union[str, Any] = None
UpperCamelCase__: int = None
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
return self.head is None
def lowerCAmelCase_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
A__: str = logging.get_logger(__name__)
def lowerCAmelCase_ ( A_ ,A_ ,A_):
return [
int(10_00 * (box[0] / width)),
int(10_00 * (box[1] / height)),
int(10_00 * (box[2] / width)),
int(10_00 * (box[3] / height)),
]
def lowerCAmelCase_ ( A_ ,A_ ,A_ = None):
UpperCamelCase__: List[str] = tesseract_config if tesseract_config is not None else ""
# apply OCR
UpperCamelCase__: Optional[int] = to_pil_image(A_)
UpperCamelCase__ , UpperCamelCase__: Tuple = pil_image.size
UpperCamelCase__: List[Any] = pytesseract.image_to_data(A_ ,lang=A_ ,output_type="dict" ,config=A_)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__: Dict = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
UpperCamelCase__: List[Any] = [idx for idx, word in enumerate(A_) if not word.strip()]
UpperCamelCase__: Union[str, Any] = [word for idx, word in enumerate(A_) if idx not in irrelevant_indices]
UpperCamelCase__: Dict = [coord for idx, coord in enumerate(A_) if idx not in irrelevant_indices]
UpperCamelCase__: List[Any] = [coord for idx, coord in enumerate(A_) if idx not in irrelevant_indices]
UpperCamelCase__: Optional[int] = [coord for idx, coord in enumerate(A_) if idx not in irrelevant_indices]
UpperCamelCase__: Optional[Any] = [coord for idx, coord in enumerate(A_) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase__: List[str] = []
for x, y, w, h in zip(A_ ,A_ ,A_ ,A_):
UpperCamelCase__: str = [x, y, x + w, y + h]
actual_boxes.append(A_)
# finally, normalize the bounding boxes
UpperCamelCase__: Union[str, Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(A_ ,A_ ,A_))
assert len(A_) == len(A_), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = ["""pixel_values"""]
def __init__( self: int , __lowerCamelCase: bool = True , __lowerCamelCase: Dict[str, int] = None , __lowerCamelCase: PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = "" , **__lowerCamelCase: str , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
UpperCamelCase__: Optional[Any] = size if size is not None else {"height": 224, "width": 224}
UpperCamelCase__: Dict = get_size_dict(__lowerCamelCase )
UpperCamelCase__: Optional[Any] = do_resize
UpperCamelCase__: Optional[int] = size
UpperCamelCase__: int = resample
UpperCamelCase__: str = apply_ocr
UpperCamelCase__: List[Any] = ocr_lang
UpperCamelCase__: List[Any] = tesseract_config
def UpperCAmelCase_ ( self: int , __lowerCamelCase: np.ndarray , __lowerCamelCase: Dict[str, int] , __lowerCamelCase: PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase: str , ):
'''simple docstring'''
UpperCamelCase__: int = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
UpperCamelCase__: int = (size["height"], size["width"])
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: ImageInput , __lowerCamelCase: bool = None , __lowerCamelCase: Dict[str, int] = None , __lowerCamelCase: PILImageResampling = None , __lowerCamelCase: bool = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[Union[str, TensorType]] = None , __lowerCamelCase: ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase: str , ):
'''simple docstring'''
UpperCamelCase__: str = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__: Any = size if size is not None else self.size
UpperCamelCase__: Union[str, Any] = get_size_dict(__lowerCamelCase )
UpperCamelCase__: Tuple = resample if resample is not None else self.resample
UpperCamelCase__: int = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase__: Any = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase__: Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase__: Any = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__: Union[str, Any] = [to_numpy_array(__lowerCamelCase ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
UpperCamelCase__: str = []
UpperCamelCase__: Optional[Any] = []
for image in images:
UpperCamelCase__ , UpperCamelCase__: Any = apply_tesseract(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
words_batch.append(__lowerCamelCase )
boxes_batch.append(__lowerCamelCase )
if do_resize:
UpperCamelCase__: List[Any] = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase__: List[str] = [flip_channel_order(__lowerCamelCase ) for image in images]
UpperCamelCase__: Optional[Any] = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
UpperCamelCase__: int = BatchFeature(data={"pixel_values": images} , tensor_type=__lowerCamelCase )
if apply_ocr:
UpperCamelCase__: Dict = words_batch
UpperCamelCase__: Tuple = boxes_batch
return data
| 149 | 1 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = RobertaTokenizer
A_ : Any = RobertaTokenizerFast
A_ : Dict = True
A_ : Tuple = {'cls_token': '<s>'}
def _UpperCAmelCase ( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
_a = '''lower newer'''
_a = '''lower newer'''
return input_text, output_text
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''lower newer'''
_a = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_a = tokenizer.tokenize(__UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__UpperCAmelCase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__UpperCAmelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained('''roberta-base''' )
_a = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase )
_a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase )
_a = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
_a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_tokenizer()
_a = '''Encode this sequence.'''
_a = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
_a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing spaces after special tokens
_a = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )} ) # mask token has a left space
_a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
_a = '''Encode <mask> sequence'''
_a = '''Encode <mask>sequence'''
_a = tokenizer.encode(__UpperCAmelCase )
_a = encoded.index(__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
_a = tokenizer.encode(__UpperCAmelCase )
_a = encoded.index(__UpperCAmelCase )
_a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
_a = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
_a = tokenizer_p.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def _UpperCAmelCase ( self ) -> Any:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __UpperCAmelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __UpperCAmelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_a = F'{text_of_1_token} {text_of_1_token}'
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
_a = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
_a = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) | 153 |
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=True , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> str:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_multiple_size
_a = hidden_act
_a = hidden_dropout
_a = attention_dropout
_a = weight_tying
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def _UpperCAmelCase ( self ) -> Tuple:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a , _a , _a , _a = self.prepare_config_and_inputs()
_a = True
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
_a = GPTNeoXJapaneseModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
_a = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
_a = True
_a = GPTNeoXJapaneseModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
_a = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
_a = True
_a = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
_a = output_from_no_past['''hidden_states'''][0]
_a = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : str = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
A_ : Tuple = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
A_ : List[str] = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
A_ : Any = False
A_ : Optional[Any] = False
A_ : Tuple = False
A_ : Optional[int] = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = GPTNeoXJapaneseModelTester(self )
_a = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> str:
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
# This regression test was failing with PyTorch < 1.3
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs_for_decoder()
_a = None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = '''abeja/gpt-neox-japanese-2.7b'''
_a = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
_a = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
_a = GPTNeoXJapaneseTokenizer.from_pretrained(__UpperCAmelCase )
_a = GPTNeoXJapaneseForCausalLM.from_pretrained(__UpperCAmelCase )
_a = []
for prompt in prompts:
_a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' ).input_ids
_a = model.generate(__UpperCAmelCase , max_length=50 )
_a = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) | 153 | 1 |
from collections.abc import Sequence
def UpperCamelCase( __UpperCamelCase : Sequence[float] ,__UpperCamelCase : bool = False ):
if not arr:
return 0
lowerCAmelCase_ : Tuple = 0 if allow_empty_subarrays else float('''-inf''' )
lowerCAmelCase_ : Optional[Any] = 0.0
for num in arr:
lowerCAmelCase_ : Union[str, Any] = max(0 if allow_empty_subarrays else num ,curr_sum + num )
lowerCAmelCase_ : List[Any] = max(__UpperCamelCase ,__UpperCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
A__ : Optional[int] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 103 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A__ : Tuple = get_logger(__name__)
class __snake_case :
_a = '''dummy_data'''
_a = '''datasets'''
_a = False
def __init__( self : Optional[Any] , A_ : str , A_ : str , A_ : Union[Version, str] , A_ : Optional[str] = None , A_ : bool = False , A_ : bool = True , A_ : Optional[List[Callable]] = None , ):
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Any = dataset_name
lowerCAmelCase_ : Union[str, Any] = cache_dir
lowerCAmelCase_ : List[Any] = use_local_dummy_data
lowerCAmelCase_ : Optional[Any] = config
# download_callbacks take a single url as input
lowerCAmelCase_ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCAmelCase_ : Tuple = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCAmelCase_ : int = str(A_)
# to be downloaded
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Optional[int] = None
@property
def UpperCAmelCase__ ( self : List[str]):
if self._dummy_file is None:
lowerCAmelCase_ : int = self.download_dummy_data()
return self._dummy_file
@property
def UpperCAmelCase__ ( self : str):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name)
@property
def UpperCAmelCase__ ( self : str):
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''')
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Any = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCAmelCase_ : Union[str, Any] = cached_path(
A_ , cache_dir=self.cache_dir , extract_compressed_file=A_ , force_extract=A_)
return os.path.join(A_ , self.dummy_file_name)
@property
def UpperCAmelCase__ ( self : List[str]):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def UpperCAmelCase__ ( self : Optional[int]):
if self._bucket_url is None:
lowerCAmelCase_ : str = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/'''))
return self._bucket_url
@property
def UpperCAmelCase__ ( self : List[Any]):
# return full path if its a dir
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''').split('''/''')[:-1])
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Dict , *A_ : List[Any]):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCAmelCase_ : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCAmelCase_ : Optional[int] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A_ , A_):
return self.create_dummy_data_dict(A_ , A_)
elif isinstance(A_ , (list, tuple)):
return self.create_dummy_data_list(A_ , A_)
else:
return self.create_dummy_data_single(A_ , A_)
def UpperCAmelCase__ ( self : Optional[int] , A_ : Tuple , *A_ : int):
return self.download_and_extract(A_)
def UpperCAmelCase__ ( self : Tuple , A_ : List[str] , A_ : Optional[Any]):
return self.download_and_extract(A_)
def UpperCAmelCase__ ( self : int , A_ : Optional[int] , *A_ : str , **A_ : List[Any]):
return path
def UpperCAmelCase__ ( self : Tuple):
return {}
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Union[str, Any] , A_ : List[Any]):
lowerCAmelCase_ : Union[str, Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A_ , A_):
for single_url in single_urls:
download_callback(A_)
else:
lowerCAmelCase_ : Any = single_urls
download_callback(A_)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A_ , A_):
lowerCAmelCase_ : Any = [os.path.join(A_ , urllib.parse.quote_plus(Path(A_).name)) for x in single_urls]
else:
lowerCAmelCase_ : Optional[int] = single_urls
lowerCAmelCase_ : List[str] = os.path.join(A_ , urllib.parse.quote_plus(Path(A_).name))
lowerCAmelCase_ : Dict = value
# make sure that values are unique
if all(isinstance(A_ , A_) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
lowerCAmelCase_ : Tuple = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCAmelCase__ ( self : Dict , A_ : List[str] , A_ : str):
lowerCAmelCase_ : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCAmelCase_ : str = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , A_)) for url in data_url)
lowerCAmelCase_ : Optional[Any] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''') for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
lowerCAmelCase_ : Any = [data_url[0]] * len(A_)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A_)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase_ : int = os.path.join(A_ , urllib.parse.quote_plus(single_url.split('''/''')[-1]))
dummy_data_list.append(A_)
return dummy_data_list
def UpperCAmelCase__ ( self : List[str] , A_ : Optional[Any] , A_ : Tuple):
for download_callback in self.download_callbacks:
download_callback(A_)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase_ : Tuple = os.path.join(A_ , urllib.parse.quote_plus(data_url.split('''/''')[-1]))
if os.path.exists(A_) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCAmelCase__ ( self : int):
pass
def UpperCAmelCase__ ( self : Optional[int]):
pass
def UpperCAmelCase__ ( self : List[str] , A_ : str):
def _iter_archive_members(A_ : Any):
# this preserves the order of the members inside the ZIP archive
lowerCAmelCase_ : Optional[int] = Path(self.dummy_file).parent
lowerCAmelCase_ : Optional[int] = path.relative_to(A_)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
lowerCAmelCase_ : Tuple = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(A_)
lowerCAmelCase_ : List[Any] = Path(A_)
lowerCAmelCase_ : Optional[int] = _iter_archive_members(A_) if self.use_local_dummy_data else path.rglob('''*''')
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''')):
yield file_path.relative_to(A_).as_posix(), file_path.open('''rb''')
def UpperCAmelCase__ ( self : Dict , A_ : Any):
if not isinstance(A_ , A_):
lowerCAmelCase_ : Dict = [paths]
for path in paths:
if os.path.isfile(A_):
if os.path.basename(A_).startswith(('''.''', '''__''')):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A_):
if os.path.basename(A_).startswith(('''.''', '''__''')):
continue
dirnames.sort()
for filename in sorted(A_):
if filename.startswith(('''.''', '''__''')):
continue
yield os.path.join(A_ , A_)
| 103 | 1 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase_ = ["text", "image", "audio"]
def __magic_name__ ( A ) -> Optional[int]:
snake_case = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(A , A ):
inputs.append(create_inputs(A ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( A ) -> int:
snake_case = []
for output in outputs:
if isinstance(A , (str, AgentText) ):
output_types.append('text' )
elif isinstance(A , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(A , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class lowerCamelCase :
def _lowerCamelCase ( self ) -> Any:
self.assertTrue(hasattr(self.tool, 'inputs' ) )
self.assertTrue(hasattr(self.tool, 'outputs' ) )
snake_case = self.tool.inputs
for _input in inputs:
if isinstance(_input, lowercase_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _lowerCamelCase ( self ) -> List[str]:
snake_case = create_inputs(self.tool.inputs )
snake_case = self.tool(*lowercase_ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case = [outputs]
self.assertListEqual(output_types(lowercase_ ), self.tool.outputs )
def _lowerCamelCase ( self ) -> Optional[int]:
self.assertTrue(hasattr(self.tool, 'description' ) )
self.assertTrue(hasattr(self.tool, 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def _lowerCamelCase ( self ) -> Any:
snake_case = create_inputs(self.tool.inputs )
snake_case = self.tool(*lowercase_ )
if not isinstance(lowercase_, lowercase_ ):
snake_case = [outputs]
self.assertEqual(len(lowercase_ ), len(self.tool.outputs ) )
for output, output_type in zip(lowercase_, self.tool.outputs ):
snake_case = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowercase_, lowercase_ ) )
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = create_inputs(self.tool.inputs )
snake_case = []
for _input, input_type in zip(lowercase_, self.tool.inputs ):
if isinstance(lowercase_, lowercase_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case = self.tool(*lowercase_ )
if not isinstance(lowercase_, lowercase_ ):
snake_case = [outputs]
self.assertEqual(len(lowercase_ ), len(self.tool.outputs ) )
| 332 |
'''simple docstring'''
def __magic_name__ ( A ) -> float:
return 1_0 - x * x
def __magic_name__ ( A , A ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(A ) * equation(A ) >= 0:
raise ValueError('Wrong space!' )
snake_case = a
while (b - a) >= 0.01:
# Find middle point
snake_case = (a + b) / 2
# Check if middle point is root
if equation(A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A ) * equation(A ) < 0:
snake_case = c
else:
snake_case = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 332 | 1 |
from __future__ import annotations
def lowerCAmelCase ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
UpperCAmelCase__ = number_of_bytes // partitions
UpperCAmelCase__ = []
for i in range(_lowerCAmelCase ):
UpperCAmelCase__ = i * bytes_per_partition + 1
UpperCAmelCase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169 |
def lowerCAmelCase ( _lowerCAmelCase : str ):
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(_lowerCAmelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 169 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( ):
return [list(range(1_000 - i, -1_000 - i, -1 ) ) for i in range(1_000 )]
__a = generate_large_matrix()
__a = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __UpperCAmelCase ( a_: list[list[int]] ):
assert all(row == sorted(a_, reverse=a_ ) for row in grid )
assert all(list(a_ ) == sorted(a_, reverse=a_ ) for col in zip(*a_ ) )
def __UpperCAmelCase ( a_: list[int] ):
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : str = len(a_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_UpperCAmelCase : List[str] = (left + right) // 2
_UpperCAmelCase : Tuple = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_UpperCAmelCase : Dict = mid + 1
else:
_UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(a_ )
def __UpperCAmelCase ( a_: list[list[int]] ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : int = len(grid[0] )
for i in range(len(a_ ) ):
_UpperCAmelCase : Dict = find_negative_index(grid[i][:bound] )
total += bound
return (len(a_ ) * len(grid[0] )) - total
def __UpperCAmelCase ( a_: list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def __UpperCAmelCase ( a_: list[list[int]] ):
_UpperCAmelCase : Union[str, Any] = 0
for row in grid:
for i, number in enumerate(a_ ):
if number < 0:
total += len(a_ ) - i
break
return total
def __UpperCAmelCase ( ):
from timeit import timeit
print("Running benchmarks" )
_UpperCAmelCase : Optional[Any] = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_UpperCAmelCase : List[Any] = timeit(f"""{func}(grid=grid)""", setup=a_, number=500 )
print(f"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 17 | '''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = (EulerDiscreteScheduler,)
UpperCamelCase_ : Tuple = 10
def _lowerCAmelCase ( self : Dict , **lowerCAmelCase__ : Tuple ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCAmelCase__ )
return config
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase : int = torch.manual_seed(0 )
_UpperCAmelCase : Any = self.dummy_model()
_UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase : List[Any] = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = output.prev_sample
_UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config(prediction_type="v_prediction" )
_UpperCAmelCase : Any = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = self.dummy_model()
_UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase : Tuple = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = output.prev_sample
_UpperCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : str = self.dummy_model()
_UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCAmelCase : str = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
_UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Any = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
_UpperCAmelCase : int = output.prev_sample
_UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : Union[str, Any] = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = self.dummy_model()
_UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCAmelCase : Optional[int] = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
_UpperCAmelCase : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = output.prev_sample
_UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3 | 17 | 1 |
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> float:
"""simple docstring"""
def get_matched_characters(_UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Dict = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_UpperCAmelCase : int = int(max(0 , i - limit ) )
_UpperCAmelCase : Any = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCAmelCase )
_UpperCAmelCase : List[Any] = F"""{_stra[0:_stra.index(_UpperCAmelCase )]} {_stra[_stra.index(_UpperCAmelCase ) + 1:]}"""
return "".join(_UpperCAmelCase )
# matching characters
_UpperCAmelCase : Union[str, Any] = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Tuple = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Tuple = len(_UpperCAmelCase )
# transposition
_UpperCAmelCase : Optional[Any] = (
len([(ca, ca) for ca, ca in zip(_UpperCAmelCase , _UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
_UpperCAmelCase : Dict = 0.0
else:
_UpperCAmelCase : Optional[int] = (
1
/ 3
* (
match_count / len(_UpperCAmelCase )
+ match_count / len(_UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_UpperCAmelCase : str = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 31 | '''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = """▁"""
__SCREAMING_SNAKE_CASE : str = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE : int = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
__SCREAMING_SNAKE_CASE : str = {
"""google/pegasus-xsum""": 512,
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = VOCAB_FILES_NAMES
__UpperCamelCase: Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase: List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase: Optional[int] = PegasusTokenizer
__UpperCamelCase: Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Dict , A : List[str]=None , A : Union[str, Any]=None , A : Optional[int]="<pad>" , A : Tuple="</s>" , A : Union[str, Any]="<unk>" , A : Union[str, Any]="<mask_2>" , A : Dict="<mask_1>" , A : Union[str, Any]=None , A : int=103 , **A : Optional[Any] , ):
_UpperCAmelCase : Dict = offset
if additional_special_tokens is not None:
if not isinstance(A , A ):
raise TypeError(
F"""additional_special_tokens should be of type {type(A )}, but is"""
F""" {type(A )}""" )
_UpperCAmelCase : Optional[int] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(A ) , self.offset - 1 )
]
if len(set(A ) ) != len(A ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
_UpperCAmelCase : Any = additional_special_tokens_extended
else:
_UpperCAmelCase : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
A , tokenizer_file=A , pad_token=A , eos_token=A , unk_token=A , mask_token=A , mask_token_sent=A , offset=A , additional_special_tokens=A , **A , )
_UpperCAmelCase : Optional[Any] = vocab_file
_UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True
def _A ( self : List[str] , A : Optional[Any] ):
_UpperCAmelCase : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
F""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def _A ( self : str , A : List , A : Optional[List] = None , A : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(A )
elif token_ids_a is None:
return self._special_token_mask(A ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _A ( self : Optional[int] , A : Union[str, Any] , A : int=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _A ( self : Union[str, Any] , A : str , A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 31 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCAmelCase_ = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['DPTFeatureExtractor']
UpperCAmelCase_ = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 61 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCAmelCase_ = logging.get_logger(__name__)
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCAmelCase__ = json.loads(SCREAMING_SNAKE_CASE__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCAmelCase__ = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCAmelCase__ = json.loads(SCREAMING_SNAKE_CASE__ )
if not mpi_options.get("""sagemaker_mpi_enabled""" , SCREAMING_SNAKE_CASE__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : str = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , _UpperCAmelCase , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
UpperCAmelCase__ = torch.device("""cpu""" )
UpperCAmelCase__ = 0
elif is_sagemaker_model_parallel_available():
UpperCAmelCase__ = smp.local_rank()
UpperCAmelCase__ = torch.device("""cuda""" , _UpperCAmelCase )
UpperCAmelCase__ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
UpperCAmelCase__ = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
UpperCAmelCase__ = torch.device("""cuda""" , self.local_rank )
UpperCAmelCase__ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCAmelCase__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCAmelCase__ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
UpperCAmelCase__ = torch.device("""cuda""" , self.local_rank )
UpperCAmelCase__ = 1
if device.type == "cuda":
torch.cuda.set_device(_UpperCAmelCase )
return device
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return False
| 61 | 1 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] ):
lowerCAmelCase_ : Dict = psutil.Process()
lowerCAmelCase_ : int = False
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[Any] = -1
while True:
lowerCAmelCase_ : Any = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : str = True
lowerCAmelCase_ : Any = threading.Thread(target=self.peak_monitor )
lowerCAmelCase_ : str = True
self.thread.start()
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : List[str] = False
self.thread.join()
return self.cpu_memory_peak
lowercase__ = PeakCPUMemory()
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : str = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCAmelCase_ : int = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCAmelCase_ : Union[str, Any] = torch.cuda.memory_allocated(__UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def __lowerCamelCase ( __UpperCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase_ : int = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCAmelCase_ : List[str] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
lowerCAmelCase_ : Union[str, Any] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCAmelCase_ : Tuple = (torch.cuda.memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20
lowerCAmelCase_ : str = (torch.cuda.max_memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20
return measures
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
print(f'''{description}:''' )
print(f'''- Time: {measures["time"]:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(f'''- GPU {i} allocated: {measures[str(__UpperCamelCase )]:.2f}MiB''' )
lowerCAmelCase_ : Optional[Any] = measures[f'''{i}-peak''']
print(f'''- GPU {i} peak: {peak:.2f}MiB''' )
print(f'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' )
print(f'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
| 241 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
# TODO Update this
lowercase__ = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : str = """esm"""
def __init__( self : Union[str, Any] , a_ : int=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Optional[int]=7_68 , a_ : List[Any]=12 , a_ : List[str]=12 , a_ : Optional[Any]=30_72 , a_ : Optional[Any]=0.1 , a_ : Tuple=0.1 , a_ : Union[str, Any]=10_26 , a_ : List[str]=0.02 , a_ : Optional[int]=1e-1_2 , a_ : int="absolute" , a_ : Union[str, Any]=True , a_ : int=None , a_ : int=False , a_ : Optional[Any]=False , a_ : Any=None , a_ : List[str]=None , **a_ : int , ):
super().__init__(pad_token_id=a_ , mask_token_id=a_ , **a_ )
lowerCAmelCase_ : str = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : Tuple = intermediate_size
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Dict = attention_probs_dropout_prob
lowerCAmelCase_ : Dict = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = initializer_range
lowerCAmelCase_ : Tuple = layer_norm_eps
lowerCAmelCase_ : Dict = position_embedding_type
lowerCAmelCase_ : str = use_cache
lowerCAmelCase_ : str = emb_layer_norm_before
lowerCAmelCase_ : Any = token_dropout
lowerCAmelCase_ : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowerCAmelCase_ : int = EsmFoldConfig()
elif isinstance(a_ , a_ ):
lowerCAmelCase_ : int = EsmFoldConfig(**a_ )
lowerCAmelCase_ : Tuple = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowerCAmelCase_ : Any = get_default_vocab_list()
else:
lowerCAmelCase_ : Optional[Any] = vocab_list
else:
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , a_ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[str] = super().to_dict()
if isinstance(self.esmfold_config , a_ ):
lowerCAmelCase_ : int = self.esmfold_config.to_dict()
return output
@dataclass
class __lowerCamelCase :
'''simple docstring'''
a_ : str = None
a_ : bool = True
a_ : bool = False
a_ : bool = False
a_ : bool = False
a_ : float = 0
a_ : bool = True
a_ : bool = False
a_ : int = 128
a_ : "TrunkConfig" = None
def lowerCamelCase ( self : str ):
if self.trunk is None:
lowerCAmelCase_ : List[Any] = TrunkConfig()
elif isinstance(self.trunk , a_ ):
lowerCAmelCase_ : str = TrunkConfig(**self.trunk )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Any = asdict(self )
lowerCAmelCase_ : int = self.trunk.to_dict()
return output
@dataclass
class __lowerCamelCase :
'''simple docstring'''
a_ : int = 48
a_ : int = 1024
a_ : int = 128
a_ : int = 32
a_ : int = 32
a_ : int = 32
a_ : float = 0
a_ : float = 0
a_ : bool = False
a_ : int = 4
a_ : Optional[int] = 128
a_ : "StructureModuleConfig" = None
def lowerCamelCase ( self : Optional[Any] ):
if self.structure_module is None:
lowerCAmelCase_ : Any = StructureModuleConfig()
elif isinstance(self.structure_module , a_ ):
lowerCAmelCase_ : Union[str, Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowerCAmelCase_ : List[str] = self.sequence_state_dim // self.sequence_head_width
lowerCAmelCase_ : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] = asdict(self )
lowerCAmelCase_ : str = self.structure_module.to_dict()
return output
@dataclass
class __lowerCamelCase :
'''simple docstring'''
a_ : int = 384
a_ : int = 128
a_ : int = 16
a_ : int = 128
a_ : int = 12
a_ : int = 4
a_ : int = 8
a_ : float = 0.1
a_ : int = 8
a_ : int = 1
a_ : int = 2
a_ : int = 7
a_ : int = 10
a_ : float = 1E-8
a_ : float = 1E5
def lowerCamelCase ( self : Optional[int] ):
return asdict(self )
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 241 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> bool:
lowercase__: int = int(number**0.5 )
return number == sq * sq
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> tuple[int, int]:
lowercase__: int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowercase__: int = x_den * y_den * z_den
lowercase__: int = gcd(__UpperCAmelCase , __UpperCAmelCase )
top //= hcf
bottom //= hcf
return top, bottom
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 3_5 ) -> int:
lowercase__: set = set()
lowercase__: int
lowercase__: Fraction = Fraction(0 )
lowercase__: tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowercase__: Union[str, Any] = x_num * y_den + x_den * y_num
lowercase__: str = x_den * y_den
lowercase__: Dict = gcd(__UpperCAmelCase , __UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase__: List[str] = add_three(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
unique_s.add(__UpperCAmelCase )
# n=2
lowercase__: Union[str, Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowercase__: Optional[int] = x_den * x_den * y_den * y_den
if is_sq(__UpperCAmelCase ) and is_sq(__UpperCAmelCase ):
lowercase__: List[Any] = int(sqrt(__UpperCAmelCase ) )
lowercase__: int = int(sqrt(__UpperCAmelCase ) )
lowercase__: int = gcd(__UpperCAmelCase , __UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase__: Optional[int] = add_three(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
unique_s.add(__UpperCAmelCase )
# n=-1
lowercase__: Optional[Any] = x_num * y_num
lowercase__: Any = x_den * y_num + x_num * y_den
lowercase__: Tuple = gcd(__UpperCAmelCase , __UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase__: int = add_three(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
unique_s.add(__UpperCAmelCase )
# n=2
lowercase__: List[str] = x_num * x_num * y_num * y_num
lowercase__: Union[str, Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__UpperCAmelCase ) and is_sq(__UpperCAmelCase ):
lowercase__: Union[str, Any] = int(sqrt(__UpperCAmelCase ) )
lowercase__: Tuple = int(sqrt(__UpperCAmelCase ) )
lowercase__: Tuple = gcd(__UpperCAmelCase , __UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase__: List[Any] = add_three(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
unique_s.add(__UpperCAmelCase )
for num, den in unique_s:
total += Fraction(__UpperCAmelCase , __UpperCAmelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 2 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "ctrl"
_UpperCAmelCase :int = ["past_key_values"]
_UpperCAmelCase :Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=246534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[int] = n_positions
lowercase__: Optional[int] = n_embd
lowercase__: Any = n_layer
lowercase__: Any = n_head
lowercase__: int = dff
lowercase__: Dict = resid_pdrop
lowercase__: Any = embd_pdrop
lowercase__: Any = layer_norm_epsilon
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = use_cache
super().__init__(**_UpperCAmelCase )
| 2 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = None
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 2
@register_to_config
def __init__( self, __a = 0.02, __a = 100, __a = 1.007, __a = 80, __a = 0.05, __a = 50, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = sigma_max
# setable values
_lowerCAmelCase : int = None
_lowerCAmelCase : np.IntTensor = None
_lowerCAmelCase : torch.FloatTensor = None # sigma(t_i)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = num_inference_steps
_lowerCAmelCase : Optional[Any] = np.arange(0, self.num_inference_steps)[::-1].copy()
_lowerCAmelCase : Tuple = torch.from_numpy(__a).to(__a)
_lowerCAmelCase : Any = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_lowerCAmelCase : int = torch.tensor(__a, dtype=torch.floataa, device=__a)
def snake_case__ ( self, __a, __a, __a = None):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
_lowerCAmelCase : Any = min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1)
else:
_lowerCAmelCase : str = 0
# sample eps ~ N(0, S_noise^2 * I)
_lowerCAmelCase : Any = self.config.s_noise * randn_tensor(sample.shape, generator=__a).to(sample.device)
_lowerCAmelCase : Optional[Any] = sigma + gamma * sigma
_lowerCAmelCase : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def snake_case__ ( self, __a, __a, __a, __a, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sample_hat + sigma_hat * model_output
_lowerCAmelCase : int = (sample_hat - pred_original_sample) / sigma_hat
_lowerCAmelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__a, derivative=__a, pred_original_sample=__a)
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = sample_prev + sigma_prev * model_output
_lowerCAmelCase : Optional[int] = (sample_prev - pred_original_sample) / sigma_prev
_lowerCAmelCase : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__a, derivative=__a, pred_original_sample=__a)
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
raise NotImplementedError()
| 36 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _snake_case :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_a , **_a ):
pass
def lowerCAmelCase_ ( _snake_case : Image ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCAmelCase_ ( _snake_case : Image ) -> Dict:
'''simple docstring'''
__magic_name__ : List[Any] = np.array(_snake_case )
__magic_name__ : Optional[int] = npimg.shape
return {"hash": hashimage(_snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
UpperCamelCase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Dict = MaskGenerationPipeline(model=_a , image_processor=_a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
__magic_name__ : str = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = "facebook/sam-vit-huge"
__magic_name__ : str = pipeline("mask-generation" , model=_a )
__magic_name__ : Tuple = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Any = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 281 | 0 |
def snake_case (UpperCAmelCase__ ) -> bool:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Optional[Any] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCAmelCase__ )
if number < 0:
return False
UpperCamelCase_: Any = number * number
while number > 0:
if number % 1_0 != number_square % 1_0:
return False
number //= 1_0
number_square //= 1_0
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 370 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
A_ : List[str] = '.'
if __name__ == "__main__":
A_ : Dict = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
A_ : Dict = []
A_ : Optional[Any] = []
with open(doctest_file_path) as fp:
for line in fp:
A_ : Tuple = line.strip()
A_ : Any = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
A_ : str = '\n'.join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.') | 292 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowercase__:
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_3 , SCREAMING_SNAKE_CASE_ : Dict=1_0 , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : List[Any]=3_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : List[Any]=3_7 , SCREAMING_SNAKE_CASE_ : Dict="gelu" , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : str=1_0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Any=0.9 , SCREAMING_SNAKE_CASE_ : List[str]=None , ) -> Optional[Any]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = num_channels
lowercase_ = patch_size
lowercase_ = tubelet_size
lowercase_ = num_frames
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = mask_ratio
lowercase_ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase_ = int(mask_ratio * self.seq_length )
def _lowercase ( self : List[Any] ) -> List[Any]:
lowercase_ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : List[str] ) -> Optional[Any]:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
lowercase_ = VideoMAEModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
lowercase_ = VideoMAEForPreTraining(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase_ = torch.ones((self.num_masks,) )
lowercase_ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase_ = mask.expand(self.batch_size , -1 ).bool()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# model only returns predictions for masked patches
lowercase_ = mask.sum().item()
lowercase_ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ = self.prepare_config_and_inputs()
lowercase_ = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Optional[int] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
a :Any = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
a :int = False
a :str = False
a :List[str] = False
a :List[Any] = False
def _lowercase ( self : Tuple ) -> Optional[Any]:
lowercase_ = VideoMAEModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ) -> int:
lowercase_ = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase_ = torch.ones((self.model_tester.num_masks,) )
lowercase_ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase_ = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase_ = bool_masked_pos.to(SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in [
*get_values(SCREAMING_SNAKE_CASE_ ),
]:
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def _lowercase ( self : Dict ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def _lowercase ( self : Union[str, Any] ) -> int:
pass
def _lowercase ( self : List[str] ) -> Dict:
lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def _lowercase ( self : int ) -> Dict:
lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> Tuple:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str ) -> List[str]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : List[Any] ) -> int:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = VideoMAEModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] ) -> Optional[int]:
if not self.has_attentions:
pass
else:
lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
lowercase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowercase_ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase_ = True
lowercase_ = False
lowercase_ = True
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowercase_ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ = True
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowercase_ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
lowercase_ = True
lowercase_ = True
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowercase ( self : List[Any] ) -> Any:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ):
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowercase_ = outputs.hidden_states
lowercase_ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
lowercase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowercase_ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self : int ) -> str:
pass
def a ( ):
'''simple docstring'''
lowercase_ = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowercase_ = np.load(_lowercase )
return list(_lowercase )
@require_torch
@require_vision
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ) -> List[str]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowercase_ = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
SCREAMING_SNAKE_CASE_ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_video()
lowercase_ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowercase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowercase_ = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@slow
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
lowercase_ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_video()
lowercase_ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# add boolean mask, indicating which patches to mask
lowercase_ = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowercase_ = torch.load(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowercase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowercase_ = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase_ = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=SCREAMING_SNAKE_CASE_ )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase_ = torch.tensor([0.51_42] , device=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.loss , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase_ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=SCREAMING_SNAKE_CASE_ ).to(
SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowercase_ = model(**SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor(torch.tensor([0.64_69] ) , device=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.loss , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 30 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , **A ) -> List[str]:
super().__init__(**A )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , A , **A ) -> Optional[Any]:
return super().__call__(A , **A )
def _lowercase( self , **A ) -> Optional[Any]:
UpperCAmelCase : List[Any] = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Dict = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
UpperCAmelCase : Optional[Any] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase( self , A , A=None , A="This is a photo of {}." ) -> Optional[Any]:
UpperCAmelCase : int = load_image(A )
UpperCAmelCase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
UpperCAmelCase : List[str] = candidate_labels
UpperCAmelCase : Tuple = [hypothesis_template.format(A ) for x in candidate_labels]
UpperCAmelCase : Union[str, Any] = self.tokenizer(A , return_tensors=self.framework , padding=A )
UpperCAmelCase : Union[str, Any] = [text_inputs]
return inputs
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : List[Any] = model_inputs.pop("""candidate_labels""" )
UpperCAmelCase : Optional[Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , A ):
UpperCAmelCase : Optional[Any] = text_inputs[0]
else:
# Batching case.
UpperCAmelCase : Any = text_inputs[0][0]
UpperCAmelCase : Dict = self.model(**A , **A )
UpperCAmelCase : List[Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def _lowercase( self , A ) -> Union[str, Any]:
UpperCAmelCase : int = model_outputs.pop("""candidate_labels""" )
UpperCAmelCase : int = model_outputs["""logits"""][0]
if self.framework == "pt":
UpperCAmelCase : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCAmelCase : Any = probs.tolist()
if not isinstance(A , A ):
UpperCAmelCase : Any = [scores]
elif self.framework == "tf":
UpperCAmelCase : List[str] = stable_softmax(A , axis=-1 )
UpperCAmelCase : Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCAmelCase : Any = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(A , A ) , key=lambda A : -x[0] )
]
return result
| 265 | 0 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCamelCase__( __A , __A , __A , unittest.TestCase ):
lowerCAmelCase__ : str = StableUnCLIPPipeline
lowerCAmelCase__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCAmelCase__ : Optional[Any] = False
def snake_case__ ( self ) -> List[Any]:
A__ = 32
A__ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A__ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=__UpperCAmelCase ,projection_dim=__UpperCAmelCase ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) )
torch.manual_seed(0 )
A__ = PriorTransformer(
num_attention_heads=2 ,attention_head_dim=12 ,embedding_dim=__UpperCAmelCase ,num_layers=1 ,)
torch.manual_seed(0 )
A__ = DDPMScheduler(
variance_type='fixed_small_log' ,prediction_type='sample' ,num_train_timesteps=10_00 ,clip_sample=__UpperCAmelCase ,clip_sample_range=5.0 ,beta_schedule='squaredcos_cap_v2' ,)
# regular denoising components
torch.manual_seed(0 )
A__ = StableUnCLIPImageNormalizer(embedding_dim=__UpperCAmelCase )
A__ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=__UpperCAmelCase ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) )
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') ,up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') ,block_out_channels=(32, 64) ,attention_head_dim=(2, 4) ,class_embed_type='projection' ,projection_class_embeddings_input_dim=embedder_projection_dim * 2 ,cross_attention_dim=__UpperCAmelCase ,layers_per_block=1 ,upcast_attention=__UpperCAmelCase ,use_linear_projection=__UpperCAmelCase ,)
torch.manual_seed(0 )
A__ = DDIMScheduler(
beta_schedule='scaled_linear' ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,prediction_type='v_prediction' ,set_alpha_to_one=__UpperCAmelCase ,steps_offset=1 ,)
torch.manual_seed(0 )
A__ = AutoencoderKL()
A__ = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ) -> str:
if str(__UpperCAmelCase ).startswith('mps' ):
A__ = torch.manual_seed(__UpperCAmelCase )
else:
A__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case__ ( self ) -> List[Any]:
A__ = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=__UpperCAmelCase )
def snake_case__ ( self ) -> int:
A__ = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__UpperCAmelCase )
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ) -> List[Any]:
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
A__ = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' ,torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ = pipe('anime turle' ,generator=__UpperCAmelCase ,output_type='np' )
A__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' ,torch_dtype=torch.floataa )
A__ = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ = pipe(
'anime turtle' ,prior_num_inference_steps=2 ,num_inference_steps=2 ,output_type='np' ,)
A__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 154 | """simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCamelCase__( __A , __A , __A , unittest.TestCase ):
lowerCAmelCase__ : str = StableUnCLIPPipeline
lowerCAmelCase__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCAmelCase__ : Optional[Any] = False
def snake_case__ ( self ) -> List[Any]:
A__ = 32
A__ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A__ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=__UpperCAmelCase ,projection_dim=__UpperCAmelCase ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) )
torch.manual_seed(0 )
A__ = PriorTransformer(
num_attention_heads=2 ,attention_head_dim=12 ,embedding_dim=__UpperCAmelCase ,num_layers=1 ,)
torch.manual_seed(0 )
A__ = DDPMScheduler(
variance_type='fixed_small_log' ,prediction_type='sample' ,num_train_timesteps=10_00 ,clip_sample=__UpperCAmelCase ,clip_sample_range=5.0 ,beta_schedule='squaredcos_cap_v2' ,)
# regular denoising components
torch.manual_seed(0 )
A__ = StableUnCLIPImageNormalizer(embedding_dim=__UpperCAmelCase )
A__ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=__UpperCAmelCase ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) )
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') ,up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') ,block_out_channels=(32, 64) ,attention_head_dim=(2, 4) ,class_embed_type='projection' ,projection_class_embeddings_input_dim=embedder_projection_dim * 2 ,cross_attention_dim=__UpperCAmelCase ,layers_per_block=1 ,upcast_attention=__UpperCAmelCase ,use_linear_projection=__UpperCAmelCase ,)
torch.manual_seed(0 )
A__ = DDIMScheduler(
beta_schedule='scaled_linear' ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,prediction_type='v_prediction' ,set_alpha_to_one=__UpperCAmelCase ,steps_offset=1 ,)
torch.manual_seed(0 )
A__ = AutoencoderKL()
A__ = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ) -> str:
if str(__UpperCAmelCase ).startswith('mps' ):
A__ = torch.manual_seed(__UpperCAmelCase )
else:
A__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case__ ( self ) -> List[Any]:
A__ = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=__UpperCAmelCase )
def snake_case__ ( self ) -> int:
A__ = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__UpperCAmelCase )
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ) -> List[Any]:
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
A__ = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' ,torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ = pipe('anime turle' ,generator=__UpperCAmelCase ,output_type='np' )
A__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' ,torch_dtype=torch.floataa )
A__ = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ = pipe(
'anime turtle' ,prior_num_inference_steps=2 ,num_inference_steps=2 ,output_type='np' ,)
A__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 154 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = """▁"""
UpperCAmelCase : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCAmelCase : Tuple = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
UpperCAmelCase : str = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
UpperCAmelCase : Tuple = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Union[str, Any] = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : int = ["""input_ids""", """attention_mask"""]
_lowercase : List[int] = []
_lowercase : List[int] = []
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
'''simple docstring'''
a__ : Any =AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
a__ : str ={} if sp_model_kwargs is None else sp_model_kwargs
a__ : Dict =kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
a__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
a__ : str =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a__ : Optional[int] ={"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a__ : Dict =1
a__ : List[Any] =len(self.sp_model )
a__ : List[str] ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__ )
}
a__ : Optional[int] ={v: k for k, v in self.lang_code_to_id.items()}
a__ : Any =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a__ : Tuple ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
a__ : Optional[int] =src_lang if src_lang is not None else "en_XX"
a__ : int =self.lang_code_to_id[self._src_lang]
a__ : Dict =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowercase ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _lowercase ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
a__ : Tuple =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
'''simple docstring'''
a__ : Tuple =self.__dict__.copy()
a__ : List[Any] =None
return state
def __setstate__( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
a__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ : Optional[int] ={}
a__ : str =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] ={self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowercase ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__ : Union[str, Any] =self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowercase ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
a__ : str =[]
a__ : Tuple =""
a__ : str =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
a__ : Optional[int] =True
a__ : Dict =[]
else:
current_sub_tokens.append(lowerCAmelCase__ )
a__ : Optional[Any] =False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : List[str] =os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , "wb" ) as fi:
a__ : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
a__ : List[str] =[1] * len(self.prefix_tokens )
a__ : List[str] =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a__ : Any =src_lang
a__ : Optional[int] =self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Any =self.convert_tokens_to_ids(lowerCAmelCase__ )
a__ : int =tgt_lang_id
return inputs
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = "en_XX" , lowerCAmelCase__ = None , lowerCAmelCase__ = "ro_RO" , **lowerCAmelCase__ , ) -> BatchEncoding:
'''simple docstring'''
a__ : Optional[Any] =src_lang
a__ : Optional[int] =tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self ) -> str:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
a__ : Tuple =self.lang_code_to_id[src_lang]
a__ : Optional[Any] =[self.cur_lang_code_id]
a__ : Optional[int] =[self.eos_token_id]
def _lowercase ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
a__ : Tuple =self.lang_code_to_id[tgt_lang]
a__ : List[Any] =[self.cur_lang_code_id]
a__ : str =[self.eos_token_id]
| 95 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = ['pixel_values']
def __init__(self , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ):
super().__init__(**__lowercase )
__lowerCAmelCase = size if size is not None else {'''shortest_edge''': 3_84}
__lowerCAmelCase = get_size_dict(__lowercase , default_to_square=__lowercase )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
# Default value set here for backwards compatibility where the value in config is None
__lowerCAmelCase = crop_pct if crop_pct is not None else 2_24 / 2_56
__lowerCAmelCase = resample
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ):
__lowerCAmelCase = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
__lowerCAmelCase = size['''shortest_edge''']
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__lowerCAmelCase = int(shortest_edge / crop_pct )
__lowerCAmelCase = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
__lowerCAmelCase = resize(image=__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowercase , size=(shortest_edge, shortest_edge) , data_format=__lowercase , **__lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowercase , size=(shortest_edge, shortest_edge) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ):
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ):
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ):
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(__lowercase , default_to_square=__lowercase )
__lowerCAmelCase = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__lowerCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 174 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Optional[int] , *UpperCamelCase__ : Any , **UpperCamelCase__ : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Any ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : str , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : str , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : int , **UpperCamelCase__ : Tuple ) -> str:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : str ) -> str:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : str , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *UpperCamelCase__ : str , **UpperCamelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Union[str, Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : Dict ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : int , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Dict ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Optional[Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : str , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : int , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : str , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : List[str] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : int , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : List[Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[str] ) -> str:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : Any , **UpperCamelCase__ : int ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : List[Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Any , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : str ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : int ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
def _UpperCamelCase ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def _UpperCamelCase ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def _UpperCamelCase ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def _UpperCamelCase ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def _UpperCamelCase ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def _UpperCamelCase ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
def _UpperCamelCase ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
'''simple docstring'''
requires_backends(__lowerCAmelCase , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : List[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *UpperCamelCase__ : str , **UpperCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : str , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : int ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Union[str, Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : str ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Tuple ) -> str:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : str , *UpperCamelCase__ : str , **UpperCamelCase__ : str ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : List[Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : str , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Tuple , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[str] ) -> Any:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> str:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Optional[Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : int , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Any ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Dict , *UpperCamelCase__ : Any , **UpperCamelCase__ : Any ) -> int:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *UpperCamelCase__ : Any , **UpperCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : str ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Dict , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : int , *UpperCamelCase__ : Any , **UpperCamelCase__ : Dict ) -> str:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : List[str] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Tuple ) -> int:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Any ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : int ) -> int:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Optional[int] , *UpperCamelCase__ : str , **UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Dict , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : int ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Dict , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Dict , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Optional[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : int ) -> int:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Optional[int] , *UpperCamelCase__ : Any , **UpperCamelCase__ : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *UpperCamelCase__ : Any , **UpperCamelCase__ : int ) -> int:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : str , *UpperCamelCase__ : str , **UpperCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : List[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *UpperCamelCase__ : str , **UpperCamelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Optional[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Union[str, Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : int , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : List[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *UpperCamelCase__ : Any , **UpperCamelCase__ : Union[str, Any] ) -> int:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : str , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ) -> str:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Union[str, Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Union[str, Any] ) -> int:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : List[str] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[Any] ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *UpperCamelCase__ : str , **UpperCamelCase__ : int ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : List[str] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ) -> int:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : str ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Dict , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : int ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : str , *UpperCamelCase__ : Any , **UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Optional[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Optional[int] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : List[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : Any ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *UpperCamelCase__ : str , **UpperCamelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Union[str, Any] ) -> int:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ) -> int:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Tuple , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : str , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[int] ) -> str:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : List[str] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ) -> Any:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : int ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : Any , **UpperCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : List[str] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Optional[int] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Dict ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : str , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : int , *UpperCamelCase__ : int , **UpperCamelCase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Optional[Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : str ) -> int:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : int , *UpperCamelCase__ : Any , **UpperCamelCase__ : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : int , *UpperCamelCase__ : str , **UpperCamelCase__ : int ) -> str:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Optional[int] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Any , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : List[str] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : str , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : List[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *UpperCamelCase__ : int , **UpperCamelCase__ : str ) -> int:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : Optional[int] , *UpperCamelCase__ : int , **UpperCamelCase__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *UpperCamelCase__ : str , **UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *UpperCamelCase__ : Any , **UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
class snake_case__ ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = ["""torch"""]
def __init__( self : str , *UpperCamelCase__ : str , **UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch'''] )
| 362 |
'''simple docstring'''
import os
def _UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
snake_case : int = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , '''num.txt''' )
with open(SCREAMING_SNAKE_CASE__ ) as file_hand:
return str(sum(int(SCREAMING_SNAKE_CASE__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 83 | 0 |
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
UpperCamelCase = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase = 1
if upper_limit > 0:
UpperCamelCase = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
lowerCAmelCase__ = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 153 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase_ = KandinskyVaaControlnetPipeline
UpperCAmelCase_ = ["image_embeds", "negative_image_embeds", "hint"]
UpperCAmelCase_ = ["image_embeds", "negative_image_embeds", "hint"]
UpperCAmelCase_ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase_ = False
@property
def snake_case_ (self ) -> Tuple:
return 32
@property
def snake_case_ (self ) -> Optional[int]:
return 32
@property
def snake_case_ (self ) -> int:
return self.time_input_dim
@property
def snake_case_ (self ) -> Dict:
return self.time_input_dim * 4
@property
def snake_case_ (self ) -> List[str]:
return 1_00
@property
def snake_case_ (self ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase = UNetaDConditionModel(**__a )
return model
@property
def snake_case_ (self ) -> Dict:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case_ (self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = self.dummy_unet
UpperCamelCase = self.dummy_movq
UpperCamelCase = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__a , set_alpha_to_one=__a , steps_offset=1 , prediction_type="epsilon" , thresholding=__a , )
UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def snake_case_ (self , __a , __a=0 ) -> Any:
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__a ) ).to(__a )
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__a )
# create hint
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith("mps" ):
UpperCamelCase = torch.manual_seed(__a )
else:
UpperCamelCase = torch.Generator(device=__a ).manual_seed(__a )
UpperCamelCase = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def snake_case_ (self ) -> int:
UpperCamelCase = "cpu"
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**__a )
UpperCamelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = pipe(**self.get_dummy_inputs(__a ) )
UpperCamelCase = output.images
UpperCamelCase = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> Dict:
UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" )
UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
UpperCamelCase = torch.from_numpy(np.array(__a ) ).float() / 255.0
UpperCamelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCamelCase = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__a )
UpperCamelCase = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
UpperCamelCase = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
UpperCamelCase = "A robot, 4k photo"
UpperCamelCase = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase = pipe_prior(
__a , generator=__a , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase = pipeline(
image_embeds=__a , negative_image_embeds=__a , hint=__a , generator=__a , num_inference_steps=1_00 , output_type="np" , )
UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__a , __a )
| 153 | 1 |
from typing import List
from .keymap import KEYMAP, get_character
def A ( lowercase ) -> Union[str, Any]:
'''simple docstring'''
def decorator(lowercase ):
UpperCamelCase = getattr(_lowerCamelCase , 'handle_key' , [] )
handle += [key]
setattr(_lowerCamelCase , 'handle_key' , _lowerCamelCase )
return func
return decorator
def A ( *lowercase ) -> Optional[int]:
'''simple docstring'''
def decorator(lowercase ):
UpperCamelCase = getattr(_lowerCamelCase , 'handle_key' , [] )
handle += keys
setattr(_lowerCamelCase , 'handle_key' , _lowerCamelCase )
return func
return decorator
class lowercase ( a__ ):
def __new__( cls , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = super().__new__(cls , A_ , A_ , A_ )
if not hasattr(A_ , 'key_handler' ):
setattr(A_ , 'key_handler' , {} )
setattr(A_ , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
UpperCamelCase = getattr(A_ , 'handle_key' , [] )
for key in handled_keys:
UpperCamelCase = value
return new_cls
@staticmethod
def __UpperCamelCase ( cls ) -> Dict:
"""simple docstring"""
UpperCamelCase = get_character()
if char != KEYMAP["undefined"]:
UpperCamelCase = ord(A_ )
UpperCamelCase = cls.key_handler.get(A_ )
if handler:
UpperCamelCase = char
return handler(cls )
else:
return None
def A ( cls ) -> Optional[Any]:
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 370 |
import pprint
import requests
_UpperCAmelCase : Union[str, Any] = "https://zenquotes.io/api"
def A ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def A ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
_UpperCAmelCase : str = random_quotes()
pprint.pprint(response)
| 110 | 0 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase : int = ['text', 'image', 'audio']
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(snake_case_ , snake_case_ ):
inputs.append(create_inputs(snake_case_ ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def lowercase__ ( snake_case_ :List ):
__UpperCAmelCase = []
for output in outputs:
if isinstance(snake_case_ , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(snake_case_ , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(snake_case_ , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class _UpperCAmelCase :
def a ( self : Optional[int] ):
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
__UpperCAmelCase = self.tool.inputs
for _input in inputs:
if isinstance(_input , _lowercase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__UpperCAmelCase = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def a ( self : int ):
__UpperCAmelCase = create_inputs(self.tool.inputs )
__UpperCAmelCase = self.tool(*_lowercase )
# There is a single output
if len(self.tool.outputs ) == 1:
__UpperCAmelCase = [outputs]
self.assertListEqual(output_types(_lowercase ) , self.tool.outputs )
def a ( self : Optional[int] ):
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def a ( self : Dict ):
__UpperCAmelCase = create_inputs(self.tool.inputs )
__UpperCAmelCase = self.tool(*_lowercase )
if not isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = [outputs]
self.assertEqual(len(_lowercase ) , len(self.tool.outputs ) )
for output, output_type in zip(_lowercase , self.tool.outputs ):
__UpperCAmelCase = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_lowercase , _lowercase ) )
def a ( self : List[str] ):
__UpperCAmelCase = create_inputs(self.tool.inputs )
__UpperCAmelCase = []
for _input, input_type in zip(_lowercase , self.tool.inputs ):
if isinstance(_lowercase , _lowercase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__UpperCAmelCase = self.tool(*_lowercase )
if not isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = [outputs]
self.assertEqual(len(_lowercase ) , len(self.tool.outputs ) )
| 332 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase : Union[str, Any] = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 332 | 1 |
"""simple docstring"""
from typing import Any
import numpy as np
def a__ ( __SCREAMING_SNAKE_CASE ) -> bool:
return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
__lowerCAmelCase: Optional[int] = v.conjugate().T
__lowerCAmelCase: List[Any] = v_star.dot(__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray )
return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE ))
def a__ ( ) -> None:
__lowerCAmelCase: List[str] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
__lowerCAmelCase: int = np.array([[1], [2], [3]] )
assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"{a} is not hermitian."
print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Union[str, Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"{a} is not hermitian."
assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 108 |
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__A = datasets.logging.get_logger(__name__)
__A = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
__A = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
__A = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
__A = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
def lowercase_ ( self : Optional[int])-> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def lowercase_ ( self : int , UpperCamelCase__ : Dict)-> List[str]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').")
__lowerCAmelCase: List[str] = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
__lowerCAmelCase: Optional[int] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__lowerCAmelCase: Tuple = self.config_name.upper()
else:
raise KeyError(
f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}")
# download the model checkpoint specified by self.config_name and set up the scorer
__lowerCAmelCase: Union[str, Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name])
__lowerCAmelCase: Dict = score.BleurtScorer(os.path.join(UpperCamelCase__ , UpperCamelCase__))
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int)-> str:
'''simple docstring'''
__lowerCAmelCase: str = self.scorer.score(references=UpperCamelCase__ , candidates=UpperCamelCase__)
return {"scores": scores}
| 108 | 1 |
"""simple docstring"""
def _A ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i, -1000 - i, -1)) for i in range(1000)]
_a = generate_large_matrix()
_a = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _A ( UpperCamelCase_ : list[list[int]]) -> None:
'''simple docstring'''
assert all(row == sorted(UpperCamelCase_, reverse=UpperCamelCase_) for row in grid)
assert all(list(UpperCamelCase_) == sorted(UpperCamelCase_, reverse=UpperCamelCase_) for col in zip(*UpperCamelCase_))
def _A ( UpperCamelCase_ : list[int]) -> int:
'''simple docstring'''
__lowercase = 0
__lowercase = len(UpperCamelCase_) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__lowercase = (left + right) // 2
__lowercase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__lowercase = mid + 1
else:
__lowercase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(UpperCamelCase_)
def _A ( UpperCamelCase_ : list[list[int]]) -> int:
'''simple docstring'''
__lowercase = 0
__lowercase = len(grid[0])
for i in range(len(UpperCamelCase_)):
__lowercase = find_negative_index(grid[i][:bound])
total += bound
return (len(UpperCamelCase_) * len(grid[0])) - total
def _A ( UpperCamelCase_ : list[list[int]]) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0])
def _A ( UpperCamelCase_ : list[list[int]]) -> int:
'''simple docstring'''
__lowercase = 0
for row in grid:
for i, number in enumerate(UpperCamelCase_):
if number < 0:
total += len(UpperCamelCase_) - i
break
return total
def _A ( ) -> None:
'''simple docstring'''
from timeit import timeit
print("Running benchmarks")
__lowercase = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__lowercase = timeit(F"""{func}(grid=grid)""", setup=UpperCamelCase_, number=500)
print(F"""{func}() took {time:0.4f} seconds""")
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 17 |
"""simple docstring"""
import baseaa
def _A ( UpperCamelCase_ : str) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8"))
def _A ( UpperCamelCase_ : bytes) -> str:
'''simple docstring'''
return baseaa.baadecode(UpperCamelCase_).decode("utf-8")
if __name__ == "__main__":
_a = 'Hello World!'
_a = baseaa_encode(test)
print(encoded)
_a = baseaa_decode(encoded)
print(decoded)
| 17 | 1 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def snake_case_ ( snake_case , snake_case , snake_case = 1_60_00 ) -> List[Any]:
lowercase__: List[str] = int(round(sample_rate * max_length ) )
if len(snake_case ) <= sample_length:
return wav
lowercase__: Optional[int] = randint(0 , len(snake_case ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __a :
__lowercase : Optional[str] = field(default=__UpperCamelCase , metadata={'help': 'Name of a dataset from the datasets package'} )
__lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'A file containing the training audio paths and labels.'} )
__lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'A file containing the validation audio paths and labels.'} )
__lowercase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
__lowercase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
__lowercase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
__lowercase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
__lowercase : Optional[int] = field(
default=__UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowercase : Optional[int] = field(
default=__UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
__lowercase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class __a :
__lowercase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
__lowercase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
__lowercase : bool = field(
default=__UpperCamelCase , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
__lowercase : bool = field(
default=__UpperCamelCase , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
__lowercase : bool = field(
default=__UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__lowercase : Optional[bool] = field(
default=__UpperCamelCase , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
__lowercase : bool = field(
default=__UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , lowerCAmelCase__ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def snake_case_ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__: Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__: int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , snake_case , snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__: int = training_args.get_process_log_level()
logger.setLevel(snake_case )
transformers.utils.logging.set_verbosity(snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase__: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__: str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
lowercase__: Optional[Any] = DatasetDict()
lowercase__: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__: Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'Make sure to set `--audio_column_name` to the correct audio column - one of '
f'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'Make sure to set `--label_column_name` to the correct text column - one of '
f'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase__: Optional[Any] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase__: str = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase__: Tuple = feature_extractor.model_input_names[0]
def train_transforms(snake_case ):
lowercase__: int = []
for audio in batch[data_args.audio_column_name]:
lowercase__: List[Any] = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(snake_case )
lowercase__: Union[str, Any] = feature_extractor(snake_case , sampling_rate=feature_extractor.sampling_rate )
lowercase__: Dict = {model_input_name: inputs.get(snake_case )}
lowercase__: int = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(snake_case ):
lowercase__: Optional[int] = [audio['array'] for audio in batch[data_args.audio_column_name]]
lowercase__: Dict = feature_extractor(snake_case , sampling_rate=feature_extractor.sampling_rate )
lowercase__: str = {model_input_name: inputs.get(snake_case )}
lowercase__: Dict = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase__: int = raw_datasets['train'].features[data_args.label_column_name].names
lowercase__: Optional[int] = {}, {}
for i, label in enumerate(snake_case ):
lowercase__: Tuple = str(snake_case )
lowercase__: Optional[Any] = label
# Load the accuracy metric from the datasets package
lowercase__: int = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(snake_case ):
lowercase__: Dict = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=snake_case , references=eval_pred.label_ids )
lowercase__: str = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(snake_case ) , labelaid=snake_case , idalabel=snake_case , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__: Union[str, Any] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase__: List[Any] = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(snake_case , output_all_columns=snake_case )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase__: str = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(snake_case , output_all_columns=snake_case )
# Initialize our trainer
lowercase__: Any = Trainer(
model=snake_case , args=snake_case , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=snake_case , tokenizer=snake_case , )
# Training
if training_args.do_train:
lowercase__: int = None
if training_args.resume_from_checkpoint is not None:
lowercase__: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__: Union[str, Any] = last_checkpoint
lowercase__: int = trainer.train(resume_from_checkpoint=snake_case )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__: str = trainer.evaluate()
trainer.log_metrics('eval' , snake_case )
trainer.save_metrics('eval' , snake_case )
# Write model card and (optionally) push to hub
lowercase__: Union[str, Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case )
else:
trainer.create_model_card(**snake_case )
if __name__ == "__main__":
main()
| 370 |
from __future__ import annotations
def snake_case_ ( snake_case , snake_case ) -> list[str]:
if nth_term == "":
return [""]
lowercase__: Tuple = int(snake_case )
lowercase__: int = int(snake_case )
lowercase__: list[str] = []
for temp in range(int(snake_case ) ):
series.append(f'1 / {pow(temp + 1 , int(snake_case ) )}' if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = int(input('''Enter the last number (nth term) of the P-Series'''))
__lowerCAmelCase = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 288 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """falcon"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""]
def __init__( self , lowercase_=6_5024 , lowercase_=4544 , lowercase_=32 , lowercase_=71 , lowercase_=1E-5 , lowercase_=0.02 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=None , lowercase_=False , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=11 , lowercase_=11 , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : str = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Dict = kwargs.pop("n_embed" , lowercase_ )
UpperCAmelCase_ : Any = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : str = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[str] = use_cache
UpperCAmelCase_ : List[Any] = hidden_dropout
UpperCAmelCase_ : List[Any] = attention_dropout
UpperCAmelCase_ : List[Any] = bos_token_id
UpperCAmelCase_ : Tuple = eos_token_id
UpperCAmelCase_ : List[str] = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ : Any = alibi
UpperCAmelCase_ : Tuple = new_decoder_architecture
UpperCAmelCase_ : Union[str, Any] = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ : List[str] = parallel_attn
UpperCAmelCase_ : int = bias
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return not self.alibi
| 61 |
"""simple docstring"""
import os
_a = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1_000}
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : List[str] = 0
while index < len(__lowerCamelCase ) - 1:
UpperCAmelCase_ : Tuple = SYMBOLS[numerals[index]]
UpperCAmelCase_ : List[str] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = ""
UpperCAmelCase_ : Any = num // 1000
numerals += m_count * "M"
num %= 1000
UpperCAmelCase_ : Any = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCAmelCase_ : str = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __a ( __lowerCamelCase = "/p089_roman.txt" ):
UpperCAmelCase_ : int = 0
with open(os.path.dirname(__lowerCamelCase ) + roman_numerals_filename ) as filea:
UpperCAmelCase_ : Optional[Any] = filea.readlines()
for line in lines:
UpperCAmelCase_ : Tuple = line.strip()
UpperCAmelCase_ : Optional[Any] = parse_roman_numerals(__lowerCamelCase )
UpperCAmelCase_ : Tuple = generate_roman_numerals(__lowerCamelCase )
savings += len(__lowerCamelCase ) - len(__lowerCamelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 61 | 1 |
"""simple docstring"""
import os
__A = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
def __A (_SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
lowerCAmelCase__ :str = 0
lowerCAmelCase__ :Any = 0
while index < len(_SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase__ :str = SYMBOLS[numerals[index]]
lowerCAmelCase__ :int = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __A (_SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = ""
lowerCAmelCase__ :int = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase__ :Any = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase__ :str = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __A (_SCREAMING_SNAKE_CASE = "/p089_roman.txt" ) ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :Dict = 0
with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + roman_numerals_filename ) as filea:
lowerCAmelCase__ :Dict = filea.readlines()
for line in lines:
lowerCAmelCase__ :List[Any] = line.strip()
lowerCAmelCase__ :Union[str, Any] = parse_roman_numerals(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = generate_roman_numerals(_SCREAMING_SNAKE_CASE )
savings += len(_SCREAMING_SNAKE_CASE ) - len(_SCREAMING_SNAKE_CASE )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 370 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__A = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__A = TaTokenizerFast
__A = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__A = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 254 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _SCREAMING_SNAKE_CASE (A ) -> bool:
"""simple docstring"""
lowercase__ = int(number**0.5 )
return number == sq * sq
def _SCREAMING_SNAKE_CASE (A , A , A , A , A , A ) -> tuple[int, int]:
"""simple docstring"""
lowercase__ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowercase__ = x_den * y_den * z_den
lowercase__ = gcd(A , A )
top //= hcf
bottom //= hcf
return top, bottom
def _SCREAMING_SNAKE_CASE (A = 35 ) -> int:
"""simple docstring"""
lowercase__ = set()
lowercase__ = 42
lowercase__ = Fraction(0 )
lowercase__ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowercase__ = x_num * y_den + x_den * y_num
lowercase__ = x_den * y_den
lowercase__ = gcd(A , A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase__ = add_three(
A , A , A , A , A , A )
unique_s.add(A )
# n=2
lowercase__ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowercase__ = x_den * x_den * y_den * y_den
if is_sq(A ) and is_sq(A ):
lowercase__ = int(sqrt(A ) )
lowercase__ = int(sqrt(A ) )
lowercase__ = gcd(A , A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase__ = add_three(
A , A , A , A , A , A )
unique_s.add(A )
# n=-1
lowercase__ = x_num * y_num
lowercase__ = x_den * y_num + x_num * y_den
lowercase__ = gcd(A , A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase__ = add_three(
A , A , A , A , A , A )
unique_s.add(A )
# n=2
lowercase__ = x_num * x_num * y_num * y_num
lowercase__ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(A ) and is_sq(A ):
lowercase__ = int(sqrt(A ) )
lowercase__ = int(sqrt(A ) )
lowercase__ = gcd(A , A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase__ = add_three(
A , A , A , A , A , A )
unique_s.add(A )
for num, den in unique_s:
total += Fraction(A , A )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 2 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A , A )
def _SCREAMING_SNAKE_CASE (A ) -> List[str]:
"""simple docstring"""
lowercase__ ,lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(A , A , bias=A )
lowercase__ = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE (A , A="facebook/mbart-large-en-ro" , A=False , A=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = torch.load(A , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A )
lowercase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase__ = MBartConfig.from_pretrained(A , vocab_size=A )
if mbart_aa and finetuned:
lowercase__ = '''relu'''
lowercase__ = state_dict['''decoder.embed_tokens.weight''']
lowercase__ = MBartForConditionalGeneration(A )
model.model.load_state_dict(A )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
lowerCamelCase : Any = parser.parse_args()
lowerCamelCase : List[str] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 2 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase : int = LongformerTokenizer
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : List[str] = LongformerTokenizerFast
_UpperCAmelCase : Tuple = True
def lowerCAmelCase ( self : Optional[Any]):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase : List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowerCamelCase : Union[str, Any] = dict(zip(a_ ,range(len(a_))))
__lowerCamelCase : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCamelCase : List[Any] = {'''unk_token''': '''<unk>'''}
__lowerCamelCase : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'])
__lowerCamelCase : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file ,'w' ,encoding='utf-8') as fp:
fp.write(json.dumps(a_) + '\n')
with open(self.merges_file ,'w' ,encoding='utf-8') as fp:
fp.write('\n'.join(a_))
def lowerCAmelCase ( self : Tuple ,**SCREAMING_SNAKE_CASE__ : Dict):
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**a_)
def lowerCAmelCase ( self : Dict ,**SCREAMING_SNAKE_CASE__ : int):
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**a_)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : str = '''lower newer'''
__lowerCamelCase : int = '''lower newer'''
return input_text, output_text
def lowerCAmelCase ( self : str):
__lowerCamelCase : Dict = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map)
__lowerCamelCase : Any = '''lower newer'''
__lowerCamelCase : Optional[Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowerCamelCase : List[str] = tokenizer.tokenize(a_) # , add_prefix_space=True)
self.assertListEqual(a_ ,a_)
__lowerCamelCase : List[str] = tokens + [tokenizer.unk_token]
__lowerCamelCase : int = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_) ,a_)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : int = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' ,add_special_tokens=a_) ,[0, 3_1_4_1_4, 2_3_2, 3_2_8, 2])
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' ,add_special_tokens=a_) ,[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] ,)
@slow
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Dict = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096')
__lowerCamelCase : List[Any] = tokenizer.encode('sequence builders' ,add_special_tokens=a_)
__lowerCamelCase : Any = tokenizer.encode('multi-sequence build' ,add_special_tokens=a_)
__lowerCamelCase : int = tokenizer.encode(
'sequence builders' ,add_special_tokens=a_ ,add_prefix_space=a_)
__lowerCamelCase : int = tokenizer.encode(
'sequence builders' ,'multi-sequence build' ,add_special_tokens=a_ ,add_prefix_space=a_)
__lowerCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(a_)
__lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(a_ ,a_)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Optional[Any] = self.get_tokenizer()
__lowerCamelCase : Optional[int] = '''Encode this sequence.'''
__lowerCamelCase : int = tokenizer.byte_encoder[''' '''.encode('utf-8')[0]]
# Testing encoder arguments
__lowerCamelCase : List[str] = tokenizer.encode(a_ ,add_special_tokens=a_ ,add_prefix_space=a_)
__lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertNotEqual(a_ ,a_)
__lowerCamelCase : List[Any] = tokenizer.encode(a_ ,add_special_tokens=a_ ,add_prefix_space=a_)
__lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertEqual(a_ ,a_)
tokenizer.add_special_tokens({'bos_token': '<s>'})
__lowerCamelCase : Optional[int] = tokenizer.encode(a_ ,add_special_tokens=a_)
__lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1])[0]
self.assertNotEqual(a_ ,a_)
# Testing spaces after special tokens
__lowerCamelCase : List[str] = '''<mask>'''
tokenizer.add_special_tokens(
{'mask_token': AddedToken(a_ ,lstrip=a_ ,rstrip=a_)}) # mask token has a left space
__lowerCamelCase : Tuple = tokenizer.convert_tokens_to_ids(a_)
__lowerCamelCase : int = '''Encode <mask> sequence'''
__lowerCamelCase : Optional[int] = '''Encode <mask>sequence'''
__lowerCamelCase : str = tokenizer.encode(a_)
__lowerCamelCase : Any = encoded.index(a_)
__lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertEqual(a_ ,a_)
__lowerCamelCase : Optional[int] = tokenizer.encode(a_)
__lowerCamelCase : Optional[Any] = encoded.index(a_)
__lowerCamelCase : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertNotEqual(a_ ,a_)
def lowerCAmelCase ( self : int):
pass
def lowerCAmelCase ( self : int):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a_ ,**a_)
__lowerCamelCase : str = self.tokenizer_class.from_pretrained(a_ ,**a_)
__lowerCamelCase : Tuple = '''A, <mask> AllenNLP sentence.'''
__lowerCamelCase : Union[str, Any] = tokenizer_r.encode_plus(a_ ,add_special_tokens=a_ ,return_token_type_ids=a_)
__lowerCamelCase : str = tokenizer_p.encode_plus(a_ ,add_special_tokens=a_ ,return_token_type_ids=a_)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids']) ,sum(tokens_p['token_type_ids']))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask']) / len(tokens_r['attention_mask']) ,sum(tokens_p['attention_mask']) / len(tokens_p['attention_mask']) ,)
__lowerCamelCase : Any = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'])
__lowerCamelCase : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(
a_ ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
self.assertSequenceEqual(
a_ ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
def lowerCAmelCase ( self : str):
for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2):
__lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname ,use_fast=a_ ,add_prefix_space=a_ ,trim_offsets=a_)
__lowerCamelCase : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__())
__lowerCamelCase : int = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__())
self.assertEqual(pre_tokenizer_state['add_prefix_space'] ,a_)
self.assertEqual(post_processor_state['add_prefix_space'] ,a_)
self.assertEqual(post_processor_state['trim_offsets'] ,a_)
def lowerCAmelCase ( self : Tuple):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCamelCase : List[Any] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase : List[str] = F"{text_of_1_token} {text_of_1_token}"
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
a_ ,use_fast=a_ ,add_prefix_space=a_ ,trim_offsets=a_)
__lowerCamelCase : List[Any] = tokenizer_r(a_ ,return_offsets_mapping=a_ ,add_special_tokens=a_)
self.assertEqual(encoding.offset_mapping[0] ,(0, len(a_)))
self.assertEqual(
encoding.offset_mapping[1] ,(len(a_) + 1, len(a_) + 1 + len(a_)) ,)
__lowerCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
a_ ,use_fast=a_ ,add_prefix_space=a_ ,trim_offsets=a_)
__lowerCamelCase : Any = tokenizer_r(a_ ,return_offsets_mapping=a_ ,add_special_tokens=a_)
self.assertEqual(encoding.offset_mapping[0] ,(0, len(a_)))
self.assertEqual(
encoding.offset_mapping[1] ,(len(a_) + 1, len(a_) + 1 + len(a_)) ,)
__lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(
a_ ,use_fast=a_ ,add_prefix_space=a_ ,trim_offsets=a_)
__lowerCamelCase : Optional[int] = tokenizer_r(a_ ,return_offsets_mapping=a_ ,add_special_tokens=a_)
self.assertEqual(encoding.offset_mapping[0] ,(0, len(a_)))
self.assertEqual(
encoding.offset_mapping[1] ,(len(a_), len(a_) + 1 + len(a_)) ,)
__lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(
a_ ,use_fast=a_ ,add_prefix_space=a_ ,trim_offsets=a_)
__lowerCamelCase : Optional[int] = tokenizer_r(a_ ,return_offsets_mapping=a_ ,add_special_tokens=a_)
self.assertEqual(encoding.offset_mapping[0] ,(0, len(a_)))
self.assertEqual(
encoding.offset_mapping[1] ,(len(a_), len(a_) + 1 + len(a_)) ,)
__lowerCamelCase : List[Any] = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowerCamelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
a_ ,use_fast=a_ ,add_prefix_space=a_ ,trim_offsets=a_)
__lowerCamelCase : Tuple = tokenizer_r(a_ ,return_offsets_mapping=a_ ,add_special_tokens=a_)
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(a_)))
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(a_) + 1, 1 + len(a_) + 1 + len(a_)) ,)
__lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
a_ ,use_fast=a_ ,add_prefix_space=a_ ,trim_offsets=a_)
__lowerCamelCase : Optional[Any] = tokenizer_r(a_ ,return_offsets_mapping=a_ ,add_special_tokens=a_)
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(a_)))
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(a_), 1 + len(a_) + 1 + len(a_)) ,)
__lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
a_ ,use_fast=a_ ,add_prefix_space=a_ ,trim_offsets=a_)
__lowerCamelCase : int = tokenizer_r(a_ ,return_offsets_mapping=a_ ,add_special_tokens=a_)
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(a_)))
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(a_), 1 + len(a_) + 1 + len(a_)) ,)
| 352 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 113 | 0 |
def a__ ( A_, A_ ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def a__ ( ):
'''simple docstring'''
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 88 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : Union[str, Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 292 | 0 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> List[Any]:
_lowerCAmelCase : List[Any] = FileLock(str(tmpdir / """foo.lock""" ) )
_lowerCAmelCase : int = FileLock(str(tmpdir / """foo.lock""" ) )
_lowerCAmelCase : int = 0.01
with locka.acquire():
with pytest.raises(_lowerCamelCase ):
_lowerCAmelCase : Dict = time.time()
locka.acquire(_lowerCamelCase )
assert time.time() - _start > timeout
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> Optional[Any]:
_lowerCAmelCase : str = """a""" * 1000 + """.lock"""
_lowerCAmelCase : Any = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(_lowerCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
_lowerCAmelCase : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_lowerCamelCase ):
locka.acquire(0 )
| 126 | """simple docstring"""
import socket
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Optional[int] = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
_lowerCAmelCase : Optional[int] = socket.gethostname()
_lowerCAmelCase : Tuple = 12312
sock.connect((host, port) )
sock.send(b"""Hello server!""" )
with open("""Received_file""" ,"""wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
_lowerCAmelCase : List[Any] = sock.recv(1024 )
if not data:
break
out_file.write(_lowerCamelCase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 126 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : int = {'vocab_file': 'vocab.json'}
__A : List[str] = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
__A : Tuple = {'mgp-str': 27}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Tuple = VOCAB_FILES_NAMES
_UpperCamelCase:List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase:Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="[GO]" , _SCREAMING_SNAKE_CASE="[GO]" , _SCREAMING_SNAKE_CASE="[s]" , _SCREAMING_SNAKE_CASE="[GO]" , **_SCREAMING_SNAKE_CASE )-> Dict:
super().__init__(
unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase_ =json.load(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ={v: k for k, v in self.vocab.items()}
@property
def _snake_case ( self )-> Optional[Any]:
return len(self.vocab )
def _snake_case ( self )-> str:
return dict(self.vocab , **self.added_tokens_encoder )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[Any]:
lowerCamelCase_ =[]
for s in text:
char_tokens.extend(_SCREAMING_SNAKE_CASE )
return char_tokens
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[int]:
return self.vocab.get(_SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[int]:
return self.decoder.get(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("""Vocabulary path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) )
return
lowerCamelCase_ =os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + """\n""" )
return (vocab_file,)
| 154 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__A : Dict = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> None:
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 154 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowercase_ = logging.get_logger(__name__)
lowercase_ = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__snake_case : Any = model_type_to_module_name(__SCREAMING_SNAKE_CASE )
__snake_case : Optional[int] = importlib.import_module(F'''.{module_name}''' , """transformers.models""" )
try:
return getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__SCREAMING_SNAKE_CASE , """__name__""" , __SCREAMING_SNAKE_CASE ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__snake_case : str = importlib.import_module("""transformers""" )
if hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return None
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
__snake_case : Optional[int] = get_file_from_repo(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(__SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as reader:
return json.load(__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str ):
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_lowerCAmelCase )
def snake_case__ ( cls : Optional[Any] , _lowerCAmelCase : List[Any] , **_lowerCAmelCase : str ):
__snake_case : int = kwargs.pop("""config""" , _lowerCAmelCase )
__snake_case : str = kwargs.pop("""trust_remote_code""" , _lowerCAmelCase )
__snake_case : Any = True
__snake_case , __snake_case : Optional[Any] = ImageProcessingMixin.get_image_processor_dict(_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : List[str] = config_dict.get("""image_processor_type""" , _lowerCAmelCase )
__snake_case : Union[str, Any] = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
__snake_case : List[str] = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__snake_case : int = config_dict.pop("""feature_extractor_type""" , _lowerCAmelCase )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
__snake_case : List[Any] = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
__snake_case : Tuple = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
__snake_case : Optional[Any] = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : List[Any] = AutoConfig.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
# It could be in `config.image_processor_type``
__snake_case : Dict = getattr(_lowerCAmelCase , """image_processor_type""" , _lowerCAmelCase )
if hasattr(_lowerCAmelCase , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
__snake_case : Tuple = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
__snake_case : Optional[int] = image_processor_class_from_name(_lowerCAmelCase )
__snake_case : Optional[Any] = image_processor_auto_map is not None
__snake_case : Optional[Any] = image_processor_class is not None or type(_lowerCAmelCase ) in IMAGE_PROCESSOR_MAPPING
__snake_case : List[str] = resolve_trust_remote_code(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if has_remote_code and trust_remote_code:
__snake_case : Any = get_class_from_dynamic_module(
_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
__snake_case : Union[str, Any] = kwargs.pop("""code_revision""" , _lowerCAmelCase )
if os.path.isdir(_lowerCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_lowerCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
__snake_case : Optional[Any] = IMAGE_PROCESSOR_MAPPING[type(_lowerCAmelCase )]
return image_processor_class.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def snake_case__ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple ):
IMAGE_PROCESSOR_MAPPING.register(_lowerCAmelCase , _lowerCAmelCase )
| 20 | import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Any ):
__snake_case : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__snake_case : str = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__snake_case : List[str] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__snake_case : str = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Any = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
# load decoder from hub
__snake_case : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def snake_case__ ( self : Optional[Any] , **_lowerCAmelCase : Tuple ):
__snake_case : int = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , **_lowerCAmelCase : Optional[int] ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Dict , **_lowerCAmelCase : Tuple ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowerCAmelCase )
def snake_case__ ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Tuple = self.get_feature_extractor()
__snake_case : Dict = self.get_decoder()
__snake_case : List[str] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : Tuple = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def snake_case__ ( self : int ):
__snake_case : Tuple = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_lowerCAmelCase , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def snake_case__ ( self : Dict ):
__snake_case : int = self.get_feature_extractor()
__snake_case : str = self.get_tokenizer()
__snake_case : Dict = self.get_decoder()
__snake_case : Any = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : List[Any] = floats_list((3, 10_00) )
__snake_case : Optional[Any] = feature_extractor(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Tuple = processor(_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self : Optional[int] ):
__snake_case : Any = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = """This is a test string"""
__snake_case : Union[str, Any] = processor(text=_lowerCAmelCase )
__snake_case : Dict = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[Any]=(2, 10, 16) , _lowerCAmelCase : str=77 ):
np.random.seed(_lowerCAmelCase )
return np.random.rand(*_lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[str] = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : List[str] = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case : int = processor.decode(_lowerCAmelCase )
__snake_case : Optional[int] = decoder.decode_beams(_lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[str] ):
__snake_case : int = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case : Tuple = processor.batch_decode(_lowerCAmelCase )
else:
with get_context(_lowerCAmelCase ).Pool() as pool:
__snake_case : int = processor.batch_decode(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : int = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as p:
__snake_case : Tuple = decoder.decode_beams_batch(_lowerCAmelCase , _lowerCAmelCase )
__snake_case , __snake_case , __snake_case : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCAmelCase , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(_lowerCAmelCase , decoded_processor.logit_score )
self.assertListEqual(_lowerCAmelCase , decoded_processor.lm_score )
def snake_case__ ( self : Optional[int] ):
__snake_case : Optional[Any] = self.get_feature_extractor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
__snake_case : List[str] = 15
__snake_case : Optional[Any] = -20.0
__snake_case : Tuple = -4.0
__snake_case : List[Any] = processor.batch_decode(
_lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : List[str] = decoded_processor_out.text
__snake_case : str = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Dict = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][2] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _lowerCAmelCase )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _lowerCAmelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _lowerCAmelCase , atol=1e-3 ) )
def snake_case__ ( self : Any ):
__snake_case : List[Any] = self.get_feature_extractor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Any = self._get_dummy_logits()
__snake_case : Any = 2.0
__snake_case : int = 5.0
__snake_case : Optional[int] = -20.0
__snake_case : Optional[int] = True
__snake_case : Any = processor.batch_decode(
_lowerCAmelCase , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
__snake_case : str = decoded_processor_out.text
__snake_case : int = list(_lowerCAmelCase )
decoder.reset_params(
alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Tuple = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _lowerCAmelCase )
__snake_case : List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : Union[str, Any] = os.listdir(_lowerCAmelCase )
__snake_case : List[str] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Union[str, Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(_lowerCAmelCase )
__snake_case : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : List[str] = os.listdir(_lowerCAmelCase )
__snake_case : List[Any] = os.listdir(_lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : str = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = floats_list((3, 10_00) )
__snake_case : Union[str, Any] = processor_wavaveca(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Union[str, Any] = processor_auto(_lowerCAmelCase , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case : Dict = self._get_dummy_logits()
__snake_case : List[Any] = processor_wavaveca.batch_decode(_lowerCAmelCase )
__snake_case : List[Any] = processor_auto.batch_decode(_lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def snake_case__ ( self : str ):
__snake_case : int = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_decoder()
__snake_case : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def snake_case__ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self : Dict ):
__snake_case : int = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : List[str] = self._get_dummy_logits()[0]
__snake_case : str = processor.decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def snake_case__ ( self : List[str] ):
__snake_case : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = self._get_dummy_logits()
__snake_case : int = processor.batch_decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def snake_case__ ( self : Optional[Any] ):
import torch
__snake_case : Optional[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_lowerCAmelCase )
__snake_case : Any = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
__snake_case : List[Any] = iter(_lowerCAmelCase )
__snake_case : Optional[int] = next(_lowerCAmelCase )
__snake_case : str = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__snake_case : str = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case : List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__snake_case : Dict = model(_lowerCAmelCase ).logits.cpu().numpy()
__snake_case : Any = processor.decode(logits[0] , output_word_offsets=_lowerCAmelCase )
__snake_case : Optional[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case : Dict = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__snake_case : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , _lowerCAmelCase )
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , output.text )
# output times
__snake_case : Dict = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """start_time""" ) )
__snake_case : Optional[Any] = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """end_time""" ) )
# fmt: off
__snake_case : Optional[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__snake_case : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
| 20 | 1 |
import random
from typing import Any
def lowerCamelCase__ ( _a):
for _ in range(len(_a)):
SCREAMING_SNAKE_CASE : Tuple = random.randint(0 , len(_a) - 1)
SCREAMING_SNAKE_CASE : Union[str, Any] = random.randint(0 , len(_a) - 1)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
a_ = [0, 1, 2, 3, 4, 5, 6, 7]
a_ = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 76 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
snake_case_ : str = logging.getLogger(__name__)
def A__ ( ):
_UpperCamelCase : List[Any] = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=UpperCAmelCase_ , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=UpperCAmelCase_ , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=UpperCAmelCase_ , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=UpperCAmelCase_ , default='data/dump' , help='The dump file prefix.' )
_UpperCamelCase : Any = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase : Optional[int] = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
_UpperCamelCase : Dict = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
_UpperCamelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase : Any = tokenizer.special_tokens_map['cls_token'] # `<s>`
_UpperCamelCase : int = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
_UpperCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase : Optional[Any] = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
_UpperCamelCase : Any = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
_UpperCamelCase : List[Any] = fp.readlines()
logger.info('Start encoding' )
logger.info(f'{len(UpperCAmelCase_ )} examples to process.' )
_UpperCamelCase : int = []
_UpperCamelCase : Any = 0
_UpperCamelCase : Any = 1_0_0_0_0
_UpperCamelCase : Optional[Any] = time.time()
for text in data:
_UpperCamelCase : List[Any] = f'{bos} {text.strip()} {sep}'
_UpperCamelCase : Any = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
rslt.append(UpperCAmelCase_ )
iter += 1
if iter % interval == 0:
_UpperCamelCase : Union[str, Any] = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
_UpperCamelCase : Tuple = time.time()
logger.info('Finished binarization' )
logger.info(f'{len(UpperCAmelCase_ )} examples processed.' )
_UpperCamelCase : Optional[int] = f'{args.dump_file}.{args.tokenizer_name}.pickle'
_UpperCamelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
_UpperCamelCase : List[Any] = [np.uintaa(UpperCAmelCase_ ) for d in rslt]
else:
_UpperCamelCase : Any = [np.intaa(UpperCAmelCase_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(UpperCAmelCase_ , 'wb' ) as handle:
pickle.dump(rslt_ , UpperCAmelCase_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 83 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : str = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 365 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : Dict = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "wavlm"
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=320 , __UpperCAmelCase=800 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.0_5 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=320 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=100 , __UpperCAmelCase=256 , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=(512, 512, 512, 512, 1500) , __UpperCAmelCase=(5, 3, 3, 1, 1) , __UpperCAmelCase=(1, 2, 3, 1, 1) , __UpperCAmelCase=512 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
__UpperCamelCase = hidden_size
__UpperCamelCase = feat_extract_norm
__UpperCamelCase = feat_extract_activation
__UpperCamelCase = list(__UpperCAmelCase )
__UpperCamelCase = list(__UpperCAmelCase )
__UpperCamelCase = list(__UpperCAmelCase )
__UpperCamelCase = conv_bias
__UpperCamelCase = num_buckets
__UpperCamelCase = max_bucket_distance
__UpperCamelCase = num_conv_pos_embeddings
__UpperCamelCase = num_conv_pos_embedding_groups
__UpperCamelCase = len(self.conv_dim )
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = feat_proj_dropout
__UpperCamelCase = final_dropout
__UpperCamelCase = layerdrop
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = initializer_range
__UpperCamelCase = num_ctc_classes
__UpperCamelCase = vocab_size
__UpperCamelCase = do_stable_layer_norm
__UpperCamelCase = use_weighted_layer_sum
__UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase = apply_spec_augment
__UpperCamelCase = mask_time_prob
__UpperCamelCase = mask_time_length
__UpperCamelCase = mask_time_min_masks
__UpperCamelCase = mask_feature_prob
__UpperCamelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
__UpperCamelCase = num_codevectors_per_group
__UpperCamelCase = num_codevector_groups
__UpperCamelCase = contrastive_logits_temperature
__UpperCamelCase = num_negatives
__UpperCamelCase = codevector_dim
__UpperCamelCase = proj_codevector_dim
__UpperCamelCase = diversity_loss_weight
# ctc loss
__UpperCamelCase = ctc_loss_reduction
__UpperCamelCase = ctc_zero_infinity
# adapter
__UpperCamelCase = add_adapter
__UpperCamelCase = adapter_kernel_size
__UpperCamelCase = adapter_stride
__UpperCamelCase = num_adapter_layers
__UpperCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase = list(__UpperCAmelCase )
__UpperCamelCase = list(__UpperCAmelCase )
__UpperCamelCase = list(__UpperCAmelCase )
__UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 263 | 0 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """xlm-prophetnet"""
SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Dict = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self , lowercase_ = 0.1 , lowercase_ = "gelu" , lowercase_ = 3_0522 , lowercase_ = 1024 , lowercase_ = 4096 , lowercase_ = 12 , lowercase_ = 16 , lowercase_ = 4096 , lowercase_ = 12 , lowercase_ = 16 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 512 , lowercase_ = 0.02 , lowercase_ = True , lowercase_ = True , lowercase_ = 0 , lowercase_ = 2 , lowercase_ = 32 , lowercase_ = 128 , lowercase_ = False , lowercase_ = 0.0 , lowercase_ = True , lowercase_ = 0 , lowercase_ = 1 , lowercase_ = 2 , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : Any = encoder_ffn_dim
UpperCAmelCase_ : Union[str, Any] = num_encoder_layers
UpperCAmelCase_ : Union[str, Any] = num_encoder_attention_heads
UpperCAmelCase_ : List[str] = decoder_ffn_dim
UpperCAmelCase_ : List[Any] = num_decoder_layers
UpperCAmelCase_ : List[str] = num_decoder_attention_heads
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : List[Any] = init_std # Normal(0, this parameter)
UpperCAmelCase_ : Tuple = activation_function
# parameters for xlmprophetnet
UpperCAmelCase_ : List[str] = ngram
UpperCAmelCase_ : Any = num_buckets
UpperCAmelCase_ : List[str] = relative_max_distance
UpperCAmelCase_ : Tuple = disable_ngram_loss
UpperCAmelCase_ : Union[str, Any] = eps
# 3 Types of Dropout
UpperCAmelCase_ : Union[str, Any] = attention_dropout
UpperCAmelCase_ : Union[str, Any] = activation_dropout
UpperCAmelCase_ : List[Any] = dropout
UpperCAmelCase_ : Dict = use_cache
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 61 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowerCAmelCase = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
lowercase__ = config_class.from_json_file(SCREAMING_SNAKE_CASE )
lowercase__ = True
lowercase__ = True
print(f'Building TensorFlow model from configuration: {config}' )
lowercase__ = model_class(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase__ = cached_file(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase__ = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if compare_with_pt_model:
lowercase__ = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE ) # build the network
lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
lowercase__ = pt_model_class.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE , state_dict=SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowercase__ = pt_model(**pt_model.dummy_inputs )
lowercase__ = pto[0].numpy()
lowercase__ = tfo[0].numpy()
lowercase__ = np.amax(np.abs(np_pt - np_tf ) )
print(f'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, f'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(f'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(SCREAMING_SNAKE_CASE , save_format='''h5''' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , ):
"""simple docstring"""
if args_model_type is None:
lowercase__ = list(MODEL_CLASSES.keys() )
else:
lowercase__ = [args_model_type]
for j, model_type in enumerate(SCREAMING_SNAKE_CASE , start=1 ):
print('''=''' * 1_00 )
print(f' Converting model type {j}/{len(SCREAMING_SNAKE_CASE )}: {model_type}' )
print('''=''' * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase__ = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase__ = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , start=1 ):
print('''-''' * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
lowercase__ = model_shortcut_name
elif only_convert_finetuned_models:
print(f' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
f' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE )}: {model_shortcut_name} - model_type {model_type}' )
print('''-''' * 1_00 )
if config_shortcut_name in aws_config_map:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
lowercase__ = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase__ = cached_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
lowercase__ = model_shortcut_name
if os.path.isfile(SCREAMING_SNAKE_CASE ):
lowercase__ = '''converted_model'''
convert_pt_checkpoint_to_tf(
model_type=SCREAMING_SNAKE_CASE , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE , config_file=SCREAMING_SNAKE_CASE , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE , model_shortcut_name + '''-tf_model.h5''' ) , compare_with_pt_model=SCREAMING_SNAKE_CASE , )
if remove_cached_files:
os.remove(SCREAMING_SNAKE_CASE )
os.remove(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
lowerCAmelCase = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 110 | 0 |
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
lowercase__ : Any = len(__lowerCamelCase )
lowercase__ : Tuple = sum(__lowerCamelCase )
lowercase__ : Any = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowercase__ : Optional[int] = True
for i in range(1 , s + 1 ):
lowercase__ : List[str] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowercase__ : Any = dp[i][j - 1]
if arr[i - 1] <= j:
lowercase__ : Optional[Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowercase__ : int = s - 2 * j
break
return diff
| 355 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCAmelCase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
lowerCAmelCase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
lowerCAmelCase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='''https://github.com/krishnap25/mauve''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/krishnap25/mauve'''] ,reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[Any] ,_snake_case : Any ,_snake_case : List[str]=None ,_snake_case : Tuple=None ,_snake_case : List[Any]=None ,_snake_case : Any=None ,_snake_case : Optional[int]="auto" ,_snake_case : Optional[int]=-1 ,_snake_case : Optional[int]=0.9 ,_snake_case : Any=5 ,_snake_case : Dict=500 ,_snake_case : Optional[int]="gpt2-large" ,_snake_case : Optional[Any]=-1 ,_snake_case : Tuple=1_024 ,_snake_case : Optional[int]=25 ,_snake_case : Dict=5 ,_snake_case : int=True ,_snake_case : Union[str, Any]=25 ,) -> Any:
"""simple docstring"""
lowercase__ : Any = compute_mauve(
p_text=_snake_case ,q_text=_snake_case ,p_features=_snake_case ,q_features=_snake_case ,p_tokens=_snake_case ,q_tokens=_snake_case ,num_buckets=_snake_case ,pca_max_data=_snake_case ,kmeans_explained_var=_snake_case ,kmeans_num_redo=_snake_case ,kmeans_max_iter=_snake_case ,featurize_model_name=_snake_case ,device_id=_snake_case ,max_text_length=_snake_case ,divergence_curve_discretization_size=_snake_case ,mauve_scaling_factor=_snake_case ,verbose=_snake_case ,seed=_snake_case ,)
return out
| 302 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.